From 847c0b9644ddcfae39c4f96027f1b46177735ab6 Mon Sep 17 00:00:00 2001 From: Chenyu Li Date: Tue, 25 Oct 2022 10:41:57 -0700 Subject: [PATCH 01/54] Tracking works with Click (#5972) Co-authored-by: Ian Knox --- core/dbt/cli/flags.py | 23 ++- core/dbt/cli/main.py | 13 +- core/dbt/flags.py | 98 ++++----- core/dbt/main.py | 25 +-- core/dbt/task/deps.py | 1 - core/dbt/tracking.py | 193 ++++++++---------- test/unit/test_tracking.py | 21 +- .../context_methods/test_builtin_functions.py | 2 +- 8 files changed, 187 insertions(+), 189 deletions(-) diff --git a/core/dbt/cli/flags.py b/core/dbt/cli/flags.py index 3593a69de84..873cdfdfa40 100644 --- a/core/dbt/cli/flags.py +++ b/core/dbt/cli/flags.py @@ -1,10 +1,12 @@ # TODO Move this to /core/dbt/flags.py when we're ready to break things import os +import sys from dataclasses import dataclass +from importlib import import_module from multiprocessing import get_context from pprint import pformat as pf -from click import get_current_context +from click import Context, get_current_context if os.name != "nt": # https://bugs.python.org/issue41567 @@ -13,7 +15,7 @@ @dataclass(frozen=True) class Flags: - def __init__(self, ctx=None) -> None: + def __init__(self, ctx: Context = None) -> None: if ctx is None: ctx = get_current_context() @@ -32,13 +34,26 @@ def assign_params(ctx): assign_params(ctx) + # Get the invoked command flags + if hasattr(ctx, "invoked_subcommand") and ctx.invoked_subcommand is not None: + invoked_subcommand = getattr(import_module("dbt.cli.main"), ctx.invoked_subcommand) + invoked_subcommand.allow_extra_args = True + invoked_subcommand.ignore_unknown_options = True + invoked_subcommand_ctx = invoked_subcommand.make_context(None, sys.argv) + assign_params(invoked_subcommand_ctx) + # Hard coded flags object.__setattr__(self, "WHICH", ctx.info_name) object.__setattr__(self, "MP_CONTEXT", get_context("spawn")) # Support console DO NOT TRACK initiave - if os.getenv("DO_NOT_TRACK", "").lower() in (1, "t", "true", "y", "yes"): - object.__setattr__(self, "ANONYMOUS_USAGE_STATS", False) + object.__setattr__( + self, + "ANONYMOUS_USAGE_STATS", + False + if os.getenv("DO_NOT_TRACK", "").lower() in (1, "t", "true", "y", "yes") + else True, + ) def __str__(self) -> str: return str(pf(self.__dict__)) diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 3f3b94ea9e3..739ada4a841 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -7,6 +7,7 @@ from dbt.cli import params as p from dbt.cli.flags import Flags from dbt.profiler import profiler +from dbt.tracking import initialize_from_flags, track_run def cli_runner(): @@ -52,17 +53,21 @@ def cli(ctx, **kwargs): """An ELT tool for managing your SQL transformations and data models. For more documentation on these commands, visit: docs.getdbt.com """ - incomplete_flags = Flags() + flags = Flags() + + # Tracking + initialize_from_flags(flags.ANONYMOUS_USAGE_STATS, flags.PROFILES_DIR) + ctx.with_resource(track_run(run_command=ctx.invoked_subcommand)) # Profiling - if incomplete_flags.RECORD_TIMING_INFO: - ctx.with_resource(profiler(enable=True, outfile=incomplete_flags.RECORD_TIMING_INFO)) + if flags.RECORD_TIMING_INFO: + ctx.with_resource(profiler(enable=True, outfile=flags.RECORD_TIMING_INFO)) # Adapter management ctx.with_resource(adapter_management()) # Version info - if incomplete_flags.VERSION: + if flags.VERSION: click.echo(f"`version` called\n ctx.params: {pf(ctx.params)}") return else: diff --git a/core/dbt/flags.py b/core/dbt/flags.py index 974aa50620c..bff51c2b343 100644 --- a/core/dbt/flags.py +++ b/core/dbt/flags.py @@ -24,27 +24,28 @@ STORE_FAILURES = False # subcommand # Global CLI commands -USE_EXPERIMENTAL_PARSER = None -STATIC_PARSER = None -WARN_ERROR = None -WRITE_JSON = None -PARTIAL_PARSE = None -USE_COLORS = None +ANONYMOUS_USAGE_STATS = None +CACHE_SELECTED_ONLY = None DEBUG = None -LOG_FORMAT = None -VERSION_CHECK = None +EVENT_BUFFER_SIZE = 100000 FAIL_FAST = None -SEND_ANONYMOUS_USAGE_STATS = None -PRINTER_WIDTH = 80 -WHICH = None INDIRECT_SELECTION = None LOG_CACHE_EVENTS = None -EVENT_BUFFER_SIZE = 100000 -QUIET = None +LOG_FORMAT = None +LOG_PATH = None NO_PRINT = None -CACHE_SELECTED_ONLY = None +PARTIAL_PARSE = None +PRINTER_WIDTH = 80 +QUIET = None +SEND_ANONYMOUS_USAGE_STATS = None +STATIC_PARSER = None TARGET_PATH = None -LOG_PATH = None +USE_COLORS = None +USE_EXPERIMENTAL_PARSER = None +VERSION_CHECK = None +WARN_ERROR = None +WHICH = None +WRITE_JSON = None _NON_BOOLEAN_FLAGS = [ "LOG_FORMAT", @@ -63,27 +64,28 @@ # CLI args, environment variables, and user_config (profiles.yml). # Environment variables use the pattern 'DBT_{flag name}', like DBT_PROFILES_DIR flag_defaults = { - "USE_EXPERIMENTAL_PARSER": False, - "STATIC_PARSER": True, - "WARN_ERROR": False, - "WRITE_JSON": True, - "PARTIAL_PARSE": True, - "USE_COLORS": True, - "PROFILES_DIR": DEFAULT_PROFILES_DIR, + "ANONYMOUS_USAGE_STATS": True, + "CACHE_SELECTED_ONLY": False, "DEBUG": False, - "LOG_FORMAT": None, - "VERSION_CHECK": True, + "EVENT_BUFFER_SIZE": 100000, "FAIL_FAST": False, - "SEND_ANONYMOUS_USAGE_STATS": True, - "PRINTER_WIDTH": 80, "INDIRECT_SELECTION": "eager", "LOG_CACHE_EVENTS": False, - "EVENT_BUFFER_SIZE": 100000, - "QUIET": False, + "LOG_FORMAT": None, + "LOG_PATH": None, "NO_PRINT": False, - "CACHE_SELECTED_ONLY": False, + "PARTIAL_PARSE": True, + "PRINTER_WIDTH": 80, + "PROFILES_DIR": DEFAULT_PROFILES_DIR, + "QUIET": False, + "SEND_ANONYMOUS_USAGE_STATS": True, + "STATIC_PARSER": True, "TARGET_PATH": None, - "LOG_PATH": None, + "USE_COLORS": True, + "USE_EXPERIMENTAL_PARSER": False, + "VERSION_CHECK": True, + "WARN_ERROR": False, + "WRITE_JSON": True, } @@ -132,7 +134,7 @@ def set_from_args(args, user_config): # black insists in putting them all on one line global STRICT_MODE, FULL_REFRESH, WARN_ERROR, USE_EXPERIMENTAL_PARSER, STATIC_PARSER global WRITE_JSON, PARTIAL_PARSE, USE_COLORS, STORE_FAILURES, PROFILES_DIR, DEBUG, LOG_FORMAT - global INDIRECT_SELECTION, VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS + global INDIRECT_SELECTION, VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS, ANONYMOUS_USAGE_STATS global PRINTER_WIDTH, WHICH, LOG_CACHE_EVENTS, EVENT_BUFFER_SIZE, QUIET, NO_PRINT, CACHE_SELECTED_ONLY global TARGET_PATH, LOG_PATH @@ -143,39 +145,42 @@ def set_from_args(args, user_config): WHICH = getattr(args, "which", WHICH) # global cli flags with env var and user_config alternatives - USE_EXPERIMENTAL_PARSER = get_flag_value("USE_EXPERIMENTAL_PARSER", args, user_config) - STATIC_PARSER = get_flag_value("STATIC_PARSER", args, user_config) - WARN_ERROR = get_flag_value("WARN_ERROR", args, user_config) - WRITE_JSON = get_flag_value("WRITE_JSON", args, user_config) - PARTIAL_PARSE = get_flag_value("PARTIAL_PARSE", args, user_config) - USE_COLORS = get_flag_value("USE_COLORS", args, user_config) - PROFILES_DIR = get_flag_value("PROFILES_DIR", args, user_config) + ANONYMOUS_USAGE_STATS = get_flag_value("ANONYMOUS_USAGE_STATS", args, user_config) + CACHE_SELECTED_ONLY = get_flag_value("CACHE_SELECTED_ONLY", args, user_config) DEBUG = get_flag_value("DEBUG", args, user_config) - LOG_FORMAT = get_flag_value("LOG_FORMAT", args, user_config) - VERSION_CHECK = get_flag_value("VERSION_CHECK", args, user_config) + EVENT_BUFFER_SIZE = get_flag_value("EVENT_BUFFER_SIZE", args, user_config) FAIL_FAST = get_flag_value("FAIL_FAST", args, user_config) - SEND_ANONYMOUS_USAGE_STATS = get_flag_value("SEND_ANONYMOUS_USAGE_STATS", args, user_config) - PRINTER_WIDTH = get_flag_value("PRINTER_WIDTH", args, user_config) INDIRECT_SELECTION = get_flag_value("INDIRECT_SELECTION", args, user_config) LOG_CACHE_EVENTS = get_flag_value("LOG_CACHE_EVENTS", args, user_config) - EVENT_BUFFER_SIZE = get_flag_value("EVENT_BUFFER_SIZE", args, user_config) - QUIET = get_flag_value("QUIET", args, user_config) + LOG_FORMAT = get_flag_value("LOG_FORMAT", args, user_config) + LOG_PATH = get_flag_value("LOG_PATH", args, user_config) NO_PRINT = get_flag_value("NO_PRINT", args, user_config) - CACHE_SELECTED_ONLY = get_flag_value("CACHE_SELECTED_ONLY", args, user_config) + PARTIAL_PARSE = get_flag_value("PARTIAL_PARSE", args, user_config) + PRINTER_WIDTH = get_flag_value("PRINTER_WIDTH", args, user_config) + PROFILES_DIR = get_flag_value("PROFILES_DIR", args, user_config) + QUIET = get_flag_value("QUIET", args, user_config) + SEND_ANONYMOUS_USAGE_STATS = get_flag_value("SEND_ANONYMOUS_USAGE_STATS", args, user_config) + STATIC_PARSER = get_flag_value("STATIC_PARSER", args, user_config) TARGET_PATH = get_flag_value("TARGET_PATH", args, user_config) - LOG_PATH = get_flag_value("LOG_PATH", args, user_config) + USE_COLORS = get_flag_value("USE_COLORS", args, user_config) + USE_EXPERIMENTAL_PARSER = get_flag_value("USE_EXPERIMENTAL_PARSER", args, user_config) + VERSION_CHECK = get_flag_value("VERSION_CHECK", args, user_config) + WARN_ERROR = get_flag_value("WARN_ERROR", args, user_config) + WRITE_JSON = get_flag_value("WRITE_JSON", args, user_config) _set_overrides_from_env() def _set_overrides_from_env(): global SEND_ANONYMOUS_USAGE_STATS + global ANONYMOUS_USAGE_STATS flag_value = _get_flag_value_from_env("DO_NOT_TRACK") if flag_value is None: return SEND_ANONYMOUS_USAGE_STATS = not flag_value + ANONYMOUS_USAGE_STATS = not flag_value def get_flag_value(flag, args, user_config): @@ -239,6 +244,7 @@ def get_flag_dict(): "version_check": VERSION_CHECK, "fail_fast": FAIL_FAST, "send_anonymous_usage_stats": SEND_ANONYMOUS_USAGE_STATS, + "anonymous_usage_stats": ANONYMOUS_USAGE_STATS, "printer_width": PRINTER_WIDTH, "indirect_selection": INDIRECT_SELECTION, "log_cache_events": LOG_CACHE_EVENTS, diff --git a/core/dbt/main.py b/core/dbt/main.py index 88196fd98ea..f1627555d7a 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -47,8 +47,6 @@ from dbt.exceptions import ( Exception as dbtException, InternalException, - NotImplementedException, - FailedToConnectException, ) @@ -178,7 +176,7 @@ def handle_and_check(args): # Set flags from args, user config, and env vars user_config = read_user_config(flags.PROFILES_DIR) # This is read again later flags.set_from_args(parsed, user_config) - dbt.tracking.initialize_from_flags() + dbt.tracking.initialize_from_flags(flags.ANONYMOUS_USAGE_STATS, flags.PROFILES_DIR) # Set log_format from flags parsed.cls.set_log_format() @@ -201,22 +199,6 @@ def handle_and_check(args): return res, success -@contextmanager -def track_run(task): - dbt.tracking.track_invocation_start(config=task.config, args=task.args) - try: - yield - dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type="ok") - except (NotImplementedException, FailedToConnectException) as e: - fire_event(MainEncounteredError(exc=str(e))) - dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type="error") - except Exception: - dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type="error") - raise - finally: - dbt.tracking.flush() - - def run_from_args(parsed): log_cache_events(getattr(parsed, "log_cache_events", False)) @@ -240,8 +222,9 @@ def run_from_args(parsed): fire_event(MainTrackingUserState(user_state=dbt.tracking.active_user.state())) results = None - - with track_run(task): + # this has been updated with project_id and adapter info removed, these will be added to new cli work + # being tracked at #6097 and #6098 + with dbt.tracking.track_run(parsed.which): results = task.run() return task, results diff --git a/core/dbt/task/deps.py b/core/dbt/task/deps.py index 5e8beff43f3..3898eb28047 100644 --- a/core/dbt/task/deps.py +++ b/core/dbt/task/deps.py @@ -38,7 +38,6 @@ def track_package_install(self, package_name: str, source_type: str, version: st elif source_type != "hub": package_name = dbt.utils.md5(package_name) version = dbt.utils.md5(version) - dbt.tracking.track_package_install( self.config, self.config.args, diff --git a/core/dbt/tracking.py b/core/dbt/tracking.py index 1c852a68649..2a1611edbfb 100644 --- a/core/dbt/tracking.py +++ b/core/dbt/tracking.py @@ -1,33 +1,30 @@ -from typing import Optional +import os +import platform import traceback +import uuid +from contextlib import contextmanager +from datetime import datetime +from typing import Optional -from dbt.clients.yaml_helper import ( # noqa:F401 - yaml, - safe_load, - Loader, - Dumper, -) +import logbook +import pytz +import requests +from snowplow_tracker import Emitter, SelfDescribingJson, Subject, Tracker +from snowplow_tracker import logger as sp_logger + +from dbt import version as dbt_version +from dbt.clients.yaml_helper import safe_load, yaml # noqa:F401 from dbt.events.functions import fire_event, get_invocation_id from dbt.events.types import ( DisableTracking, - SendingEvent, - SendEventFailure, FlushEvents, FlushEventsFailure, + MainEncounteredError, + SendEventFailure, + SendingEvent, TrackingInitializeFailure, ) -from dbt import version as dbt_version -from dbt import flags -from snowplow_tracker import Subject, Tracker, Emitter, logger as sp_logger -from snowplow_tracker import SelfDescribingJson -from datetime import datetime - -import logbook -import pytz -import platform -import uuid -import requests -import os +from dbt.exceptions import FailedToConnectException, NotImplementedException sp_logger.setLevel(100) @@ -178,61 +175,6 @@ def get_cookie(self): active_user: Optional[User] = None -def get_run_type(args): - return "regular" - - -def get_invocation_context(user, config, args): - # this adapter might not have implemented the type or unique_field properties - try: - adapter_type = config.credentials.type - except Exception: - adapter_type = None - try: - adapter_unique_id = config.credentials.hashed_unique_field() - except Exception: - adapter_unique_id = None - - return { - "project_id": None if config is None else config.hashed_name(), - "user_id": user.id, - "invocation_id": get_invocation_id(), - "command": args.which, - "options": None, - "version": str(dbt_version.installed), - "run_type": get_run_type(args), - "adapter_type": adapter_type, - "adapter_unique_id": adapter_unique_id, - } - - -def get_invocation_start_context(user, config, args): - data = get_invocation_context(user, config, args) - - start_data = {"progress": "start", "result_type": None, "result": None} - - data.update(start_data) - return SelfDescribingJson(INVOCATION_SPEC, data) - - -def get_invocation_end_context(user, config, args, result_type): - data = get_invocation_context(user, config, args) - - start_data = {"progress": "end", "result_type": result_type, "result": None} - - data.update(start_data) - return SelfDescribingJson(INVOCATION_SPEC, data) - - -def get_invocation_invalid_context(user, config, args, result_type): - data = get_invocation_context(user, config, args) - - start_data = {"progress": "invalid", "result_type": result_type, "result": None} - - data.update(start_data) - return SelfDescribingJson(INVOCATION_SPEC, data) - - def get_platform_context(): data = { "platform": platform.platform(), @@ -268,9 +210,11 @@ def track(user, *args, **kwargs): fire_event(SendEventFailure()) -def track_invocation_start(config=None, args=None): +def track_invocation_start(invocation_context): + data = {"progress": "start", "result_type": None, "result": None} + data.update(invocation_context) context = [ - get_invocation_start_context(active_user, config, args), + SelfDescribingJson(INVOCATION_SPEC, data), get_platform_context(), get_dbt_env_context(), ] @@ -326,10 +270,34 @@ def track_rpc_request(options): ) +def get_base_invocation_context(): + assert ( + active_user is not None + ), "initialize active user before calling get_base_invocation_context" + return { + "project_id": None, + "user_id": active_user.id, + "invocation_id": active_user.invocation_id, + "command": None, + "options": None, + "version": str(dbt_version.installed), + "run_type": "regular", + "adapter_type": None, + "adapter_unique_id": None, + } + + def track_package_install(config, args, options): assert active_user is not None, "Cannot track package installs when active user is None" - invocation_data = get_invocation_context(active_user, config, args) + invocation_data = get_base_invocation_context() + + invocation_data.update( + { + "project_id": None if config is None else config.hashed_name(), + "command": args.which, + } + ) context = [ SelfDescribingJson(INVOCATION_SPEC, invocation_data), @@ -362,10 +330,11 @@ def track_deprecation_warn(options): ) -def track_invocation_end(config=None, args=None, result_type=None): - user = active_user +def track_invocation_end(invocation_context, result_type=None): + data = {"progress": "end", "result_type": result_type, "result": None} + data.update(invocation_context) context = [ - get_invocation_end_context(user, config, args, result_type), + SelfDescribingJson(INVOCATION_SPEC, data), get_platform_context(), get_dbt_env_context(), ] @@ -375,14 +344,17 @@ def track_invocation_end(config=None, args=None, result_type=None): track(active_user, category="dbt", action="invocation", label="end", context=context) -def track_invalid_invocation(config=None, args=None, result_type=None): +def track_invalid_invocation(args=None, result_type=None): assert active_user is not None, "Cannot track invalid invocations when active user is None" - - user = active_user - invocation_context = get_invocation_invalid_context(user, config, args, result_type) - - context = [invocation_context, get_platform_context(), get_dbt_env_context()] - + invocation_context = get_base_invocation_context() + invocation_context.update({"command": args.which}) + data = {"progress": "invalid", "result_type": result_type, "result": None} + data.update(invocation_context) + context = [ + SelfDescribingJson(INVOCATION_SPEC, data), + get_platform_context(), + get_dbt_env_context(), + ] track(active_user, category="dbt", action="invocation", label="invalid", context=context) @@ -447,16 +419,6 @@ def do_not_track(): active_user = User(None) -def initialize_tracking(cookie_dir): - global active_user - active_user = User(cookie_dir) - try: - active_user.initialize() - except Exception: - fire_event(TrackingInitializeFailure(exc_info=traceback.format_exc())) - active_user = User(None) - - class InvocationProcessor(logbook.Processor): def __init__(self): super().__init__() @@ -471,9 +433,34 @@ def process(self, record): ) -def initialize_from_flags(): +def initialize_from_flags(anonymous_usage_stats, profiles_dir): # Setting these used to be in UserConfig, but had to be moved here - if flags.SEND_ANONYMOUS_USAGE_STATS: - initialize_tracking(flags.PROFILES_DIR) + global active_user + if anonymous_usage_stats: + active_user = User(profiles_dir) + try: + active_user.initialize() + except Exception: + fire_event(TrackingInitializeFailure(exc_info=traceback.format_exc())) + active_user = User(None) else: - do_not_track() + active_user = User(None) + + +@contextmanager +def track_run(run_command=None): + invocation_context = get_base_invocation_context() + invocation_context["command"] = run_command + + track_invocation_start(invocation_context) + try: + yield + track_invocation_end(invocation_context, result_type="ok") + except (NotImplementedException, FailedToConnectException) as e: + fire_event(MainEncounteredError(exc=str(e))) + track_invocation_end(invocation_context, result_type="error") + except Exception: + track_invocation_end(invocation_context, result_type="error") + raise + finally: + flush() diff --git a/test/unit/test_tracking.py b/test/unit/test_tracking.py index a247734d53f..acec367d655 100644 --- a/test/unit/test_tracking.py +++ b/test/unit/test_tracking.py @@ -3,7 +3,7 @@ import shutil import tempfile import unittest - +from unittest.mock import MagicMock class TestTracking(unittest.TestCase): def setUp(self): @@ -16,7 +16,10 @@ def tearDown(self): def test_tracking_initial(self): assert dbt.tracking.active_user is None - dbt.tracking.initialize_tracking(self.tempdir) + dbt.tracking.initialize_from_flags( + True, + self.tempdir + ) assert isinstance(dbt.tracking.active_user, dbt.tracking.User) invocation_id = dbt.tracking.active_user.invocation_id @@ -73,14 +76,14 @@ def test_disable_never_enabled(self): assert isinstance(dbt.tracking.active_user.run_started_at, datetime.datetime) def test_initialize_from_flags(self): - for send_aonymous_usage_stats in [True, False]: + for send_anonymous_usage_stats in [True, False]: with self.subTest( - send_aonymous_usage_stats=send_aonymous_usage_stats + send_anonymous_usage_stats=send_anonymous_usage_stats ): - dbt.tracking.flags.SEND_ANONYMOUS_USAGE_STATS = ( - send_aonymous_usage_stats - ) - dbt.tracking.initialize_from_flags() + dbt.tracking.initialize_from_flags( + send_anonymous_usage_stats, + self.tempdir + ) - assert dbt.tracking.active_user.do_not_track != send_aonymous_usage_stats + assert dbt.tracking.active_user.do_not_track != send_anonymous_usage_stats diff --git a/tests/functional/context_methods/test_builtin_functions.py b/tests/functional/context_methods/test_builtin_functions.py index 83043b15a10..68501c146f9 100644 --- a/tests/functional/context_methods/test_builtin_functions.py +++ b/tests/functional/context_methods/test_builtin_functions.py @@ -112,7 +112,7 @@ def test_builtin_invocation_args_dict_function(self, project): expected = "invocation_result: {'debug': True, 'log_format': 'json', 'write_json': True, 'use_colors': True, 'printer_width': 80, 'version_check': True, 'partial_parse': True, 'static_parser': True, 'profiles_dir': " assert expected in str(result) - expected = "'send_anonymous_usage_stats': False, 'event_buffer_size': 100000, 'quiet': False, 'no_print': False, 'macro': 'validate_invocation', 'args': '{my_variable: test_variable}', 'which': 'run-operation', 'rpc_method': 'run-operation', 'indirect_selection': 'eager'}" + expected = "'send_anonymous_usage_stats': False, 'event_buffer_size': 100000, 'quiet': False, 'no_print': False, 'macro': 'validate_invocation', 'args': '{my_variable: test_variable}', 'which': 'run-operation', 'rpc_method': 'run-operation', 'anonymous_usage_stats': True, 'indirect_selection': 'eager'}" assert expected in str(result) def test_builtin_dbt_metadata_envs_function(self, project, monkeypatch): From 5efc4aa066d3d9a9599df421101db59494c88b9a Mon Sep 17 00:00:00 2001 From: Ian Knox <81931810+iknox-fa@users.noreply.github.com> Date: Tue, 25 Oct 2022 13:32:15 -0500 Subject: [PATCH 02/54] Fix CI issue with tox (#6137) --- .../unreleased/Fixes-20221016-173742.yaml | 8 +++ .../Under the Hood-20221017-151511.yaml | 7 ++ .../Under the Hood-20221017-155844.yaml | 7 ++ .changie.yaml | 2 +- core/dbt/adapters/base/impl.py | 2 +- core/dbt/clients/_jinja_blocks.py | 6 +- core/dbt/config/project.py | 4 +- core/dbt/context/base.py | 2 +- core/dbt/context/providers.py | 6 +- core/dbt/contracts/connection.py | 2 +- core/dbt/contracts/graph/manifest.py | 2 +- core/dbt/contracts/results.py | 2 +- core/dbt/events/adapter_endpoint.py | 1 + core/dbt/events/base_types.py | 17 +++++ core/dbt/events/types.py | 30 ++++---- core/dbt/exceptions.py | 4 +- core/dbt/graph/graph.py | 2 +- core/dbt/graph/selector.py | 2 +- core/dbt/logger.py | 4 +- core/dbt/parser/base.py | 2 +- core/dbt/parser/generic_test_builders.py | 2 +- core/dbt/parser/schemas.py | 2 +- core/dbt/parser/sources.py | 4 +- core/dbt/task/base.py | 2 +- core/dbt/task/compile.py | 4 +- core/dbt/task/freshness.py | 2 +- core/dbt/task/runnable.py | 4 +- core/dbt/utils.py | 4 +- .../complex_cycle_models/model_a.sql | 2 - .../complex_cycle_models/model_b.sql | 4 -- .../complex_cycle_models/model_c.sql | 2 - .../complex_cycle_models/model_d.sql | 2 - .../complex_cycle_models/model_e.sql | 2 - .../complex_cycle_models/readme | 7 -- .../simple_cycle_models/model_a.sql | 2 - .../simple_cycle_models/model_b.sql | 2 - .../027_cycle_tests/test_cycles.py | 37 ---------- .../models/schema.yml | 3 - .../test_all_comment_yml_files.py | 20 ------ .../functional/artifacts/expected_manifest.py | 2 +- tests/functional/cycles/test_cycles.py | 71 +++++++++++++++++++ tests/functional/schema_tests/fixtures.py | 4 ++ .../schema_tests/test_schema_v2_tests.py | 19 +++++ tests/unit/test_events.py | 9 +++ tox.ini | 2 + 45 files changed, 194 insertions(+), 132 deletions(-) create mode 100644 .changes/unreleased/Fixes-20221016-173742.yaml create mode 100644 .changes/unreleased/Under the Hood-20221017-151511.yaml create mode 100644 .changes/unreleased/Under the Hood-20221017-155844.yaml delete mode 100644 test/integration/027_cycle_tests/complex_cycle_models/model_a.sql delete mode 100644 test/integration/027_cycle_tests/complex_cycle_models/model_b.sql delete mode 100644 test/integration/027_cycle_tests/complex_cycle_models/model_c.sql delete mode 100644 test/integration/027_cycle_tests/complex_cycle_models/model_d.sql delete mode 100644 test/integration/027_cycle_tests/complex_cycle_models/model_e.sql delete mode 100644 test/integration/027_cycle_tests/complex_cycle_models/readme delete mode 100644 test/integration/027_cycle_tests/simple_cycle_models/model_a.sql delete mode 100644 test/integration/027_cycle_tests/simple_cycle_models/model_b.sql delete mode 100644 test/integration/027_cycle_tests/test_cycles.py delete mode 100644 test/integration/071_commented_yaml_regression_3568_tests/models/schema.yml delete mode 100644 test/integration/071_commented_yaml_regression_3568_tests/test_all_comment_yml_files.py create mode 100644 tests/functional/cycles/test_cycles.py diff --git a/.changes/unreleased/Fixes-20221016-173742.yaml b/.changes/unreleased/Fixes-20221016-173742.yaml new file mode 100644 index 00000000000..11d4a8c85f4 --- /dev/null +++ b/.changes/unreleased/Fixes-20221016-173742.yaml @@ -0,0 +1,8 @@ +kind: Fixes +body: Add functors to ensure event types with str-type attributes are initialized + to spec, even when provided non-str type params. +time: 2022-10-16T17:37:42.846683-07:00 +custom: + Author: versusfacit + Issue: "5436" + PR: "5874" diff --git a/.changes/unreleased/Under the Hood-20221017-151511.yaml b/.changes/unreleased/Under the Hood-20221017-151511.yaml new file mode 100644 index 00000000000..cbdcf04beb3 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221017-151511.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Fixed extra whitespace in strings introduced by black. +time: 2022-10-17T15:15:11.499246-05:00 +custom: + Author: luke-bassett + Issue: "1350" + PR: "6086" diff --git a/.changes/unreleased/Under the Hood-20221017-155844.yaml b/.changes/unreleased/Under the Hood-20221017-155844.yaml new file mode 100644 index 00000000000..84e6675351c --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221017-155844.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Clean up string formatting +time: 2022-10-17T15:58:44.676549-04:00 +custom: + Author: eve-johns + Issue: "6068" + PR: "6082" diff --git a/.changie.yaml b/.changie.yaml index 9d938a9c519..0744c5bb9c7 100644 --- a/.changie.yaml +++ b/.changie.yaml @@ -44,7 +44,7 @@ custom: footerFormat: | {{- $contributorDict := dict }} {{- /* any names added to this list should be all lowercase for later matching purposes */}} - {{- $core_team := list "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }} + {{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }} {{- range $change := .Changes }} {{- $authorList := splitList " " $change.Custom.Author }} {{- /* loop through all authors for a PR */}} diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py index 3f8a1e6f78f..3c301c2e7f4 100644 --- a/core/dbt/adapters/base/impl.py +++ b/core/dbt/adapters/base/impl.py @@ -581,7 +581,7 @@ def list_relations_without_caching(self, schema_relation: BaseRelation) -> List[ :rtype: List[self.Relation] """ raise NotImplementedException( - "`list_relations_without_caching` is not implemented for this " "adapter!" + "`list_relations_without_caching` is not implemented for this adapter!" ) ### diff --git a/core/dbt/clients/_jinja_blocks.py b/core/dbt/clients/_jinja_blocks.py index 761c6dfcb4d..c1ef31acf44 100644 --- a/core/dbt/clients/_jinja_blocks.py +++ b/core/dbt/clients/_jinja_blocks.py @@ -367,9 +367,9 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True): if self.current: linecount = self.data[: self.current.end].count("\n") + 1 dbt.exceptions.raise_compiler_error( - ( - "Reached EOF without finding a close tag for " "{} (searched from line {})" - ).format(self.current.block_type_name, linecount) + ("Reached EOF without finding a close tag for {} (searched from line {})").format( + self.current.block_type_name, linecount + ) ) if collect_raw_data: diff --git a/core/dbt/config/project.py b/core/dbt/config/project.py index d2aaee699a3..9521dd29882 100644 --- a/core/dbt/config/project.py +++ b/core/dbt/config/project.py @@ -248,7 +248,7 @@ class PartialProject(RenderComponents): project_name: Optional[str] = field( metadata=dict( description=( - "The name of the project. This should always be set and will not " "be rendered" + "The name of the project. This should always be set and will not be rendered" ) ) ) @@ -668,7 +668,7 @@ def hashed_name(self): def get_selector(self, name: str) -> Union[SelectionSpec, bool]: if name not in self.selectors: raise RuntimeException( - f"Could not find selector named {name}, expected one of " f"{list(self.selectors)}" + f"Could not find selector named {name}, expected one of {list(self.selectors)}" ) return self.selectors[name]["definition"] diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index bf334a7d11f..68b5edb98c1 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -126,7 +126,7 @@ def __new__(mcls, name, bases, dct): class Var: - UndefinedVarError = "Required var '{}' not found in config:\nVars " "supplied to {} = {}" + UndefinedVarError = "Required var '{}' not found in config:\nVars supplied to {} = {}" _VAR_NOTSET = object() def __init__( diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index c053d28d1df..597b526e384 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -182,7 +182,7 @@ def dispatch( return macro searched = ", ".join(repr(a) for a in attempts) - msg = f"In dispatch: No macro named '{macro_name}' found\n" f" Searched for: {searched}" + msg = f"In dispatch: No macro named '{macro_name}' found\n Searched for: {searched}" raise CompilationException(msg) @@ -220,12 +220,12 @@ def _repack_args(self, name: str, package: Optional[str]) -> List[str]: def validate_args(self, name: str, package: Optional[str]): if not isinstance(name, str): raise CompilationException( - f"The name argument to ref() must be a string, got " f"{type(name)}" + f"The name argument to ref() must be a string, got {type(name)}" ) if package is not None and not isinstance(package, str): raise CompilationException( - f"The package argument to ref() must be a string or None, got " f"{type(package)}" + f"The package argument to ref() must be a string or None, got {type(package)}" ) def __call__(self, *args: str) -> RelationProxy: diff --git a/core/dbt/contracts/connection.py b/core/dbt/contracts/connection.py index 831230d661d..a32bb443099 100644 --- a/core/dbt/contracts/connection.py +++ b/core/dbt/contracts/connection.py @@ -94,7 +94,7 @@ def handle(self): self._handle.resolve(self) except RecursionError as exc: raise InternalException( - "A connection's open() method attempted to read the " "handle value" + "A connection's open() method attempted to read the handle value" ) from exc return self._handle diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index 7e4c42fce76..a2d22e6e315 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -499,7 +499,7 @@ def _update_into(dest: MutableMapping[str, T], new_item: T): existing = dest[unique_id] if new_item.original_file_path != existing.original_file_path: raise dbt.exceptions.RuntimeException( - f"cannot update a {new_item.resource_type} to have a new file " f"path!" + f"cannot update a {new_item.resource_type} to have a new file path!" ) dest[unique_id] = new_item diff --git a/core/dbt/contracts/results.py b/core/dbt/contracts/results.py index cb0a6e2a67e..a3b7ce2b506 100644 --- a/core/dbt/contracts/results.py +++ b/core/dbt/contracts/results.py @@ -339,7 +339,7 @@ def process_freshness_result(result: FreshnessNodeResult) -> FreshnessNodeOutput criteria = result.node.freshness if criteria is None: raise InternalException( - "Somehow evaluated a freshness result for a source " "that has no freshness criteria!" + "Somehow evaluated a freshness result for a source that has no freshness criteria!" ) return SourceFreshnessOutput( unique_id=unique_id, diff --git a/core/dbt/events/adapter_endpoint.py b/core/dbt/events/adapter_endpoint.py index aff157ab611..68a73d8aecb 100644 --- a/core/dbt/events/adapter_endpoint.py +++ b/core/dbt/events/adapter_endpoint.py @@ -9,6 +9,7 @@ ) +# N.B. No guarantees for what type param msg is. @dataclass class AdapterLogger: name: str diff --git a/core/dbt/events/base_types.py b/core/dbt/events/base_types.py index 489b70cb1ad..cd3275c02a9 100644 --- a/core/dbt/events/base_types.py +++ b/core/dbt/events/base_types.py @@ -99,6 +99,23 @@ def level_tag(self) -> str: return "error" +# Included to ensure classes with str-type message members are initialized correctly. +@dataclass # type: ignore[misc] +class AdapterEventStringFunctor: + def __post_init__(self): + super().__post_init__() + if not isinstance(self.base_msg, str): + self.base_msg = str(self.base_msg) + + +@dataclass # type: ignore[misc] +class EventStringFunctor: + def __post_init__(self): + super().__post_init__() + if not isinstance(self.msg, str): + self.msg = str(self.msg) + + # prevents an event from going to the file # This should rarely be used in core code. It is currently # only used in integration tests and for the 'clean' command. diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 60204138c36..f6e66f941d2 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -7,6 +7,8 @@ WarnLevel, ErrorLevel, Cache, + AdapterEventStringFunctor, + EventStringFunctor, ) from dbt.events.format import format_fancy_output_line, pluralize @@ -309,7 +311,7 @@ def message(self) -> str: @dataclass -class AdapterEventDebug(DebugLevel, pt.AdapterEventDebug): # noqa +class AdapterEventDebug(DebugLevel, AdapterEventStringFunctor, pt.AdapterEventDebug): # noqa def code(self): return "E001" @@ -318,7 +320,7 @@ def message(self): @dataclass -class AdapterEventInfo(InfoLevel, pt.AdapterEventInfo): # noqa +class AdapterEventInfo(InfoLevel, AdapterEventStringFunctor, pt.AdapterEventInfo): # noqa def code(self): return "E002" @@ -327,7 +329,7 @@ def message(self): @dataclass -class AdapterEventWarning(WarnLevel, pt.AdapterEventWarning): # noqa +class AdapterEventWarning(WarnLevel, AdapterEventStringFunctor, pt.AdapterEventWarning): # noqa def code(self): return "E003" @@ -336,7 +338,7 @@ def message(self): @dataclass -class AdapterEventError(ErrorLevel, pt.AdapterEventError): # noqa +class AdapterEventError(ErrorLevel, AdapterEventStringFunctor, pt.AdapterEventError): # noqa def code(self): return "E004" @@ -1218,7 +1220,9 @@ def message(self) -> str: # TODO: switch to storing structured info and calling get_target_failure_msg @dataclass -class InvalidDisabledSourceInTestNode(WarnLevel, pt.InvalidDisabledSourceInTestNode): +class InvalidDisabledSourceInTestNode( + WarnLevel, EventStringFunctor, pt.InvalidDisabledSourceInTestNode +): def code(self): return "I050" @@ -1227,7 +1231,7 @@ def message(self) -> str: @dataclass -class InvalidRefInTestNode(DebugLevel, pt.InvalidRefInTestNode): +class InvalidRefInTestNode(DebugLevel, EventStringFunctor, pt.InvalidRefInTestNode): def code(self): return "I051" @@ -1334,7 +1338,7 @@ def message(self) -> str: @dataclass -class MacroEventInfo(InfoLevel, pt.MacroEventInfo): +class MacroEventInfo(InfoLevel, EventStringFunctor, pt.MacroEventInfo): def code(self): return "M011" @@ -1343,7 +1347,7 @@ def message(self) -> str: @dataclass -class MacroEventDebug(DebugLevel, pt.MacroEventDebug): +class MacroEventDebug(DebugLevel, EventStringFunctor, pt.MacroEventDebug): def code(self): return "M012" @@ -2261,7 +2265,7 @@ def message(self) -> str: @dataclass -class RunResultError(ErrorLevel, pt.RunResultError): +class RunResultError(ErrorLevel, EventStringFunctor, pt.RunResultError): def code(self): return "Z024" @@ -2299,7 +2303,7 @@ def message(self) -> str: @dataclass -class FirstRunResultError(ErrorLevel, pt.FirstRunResultError): +class FirstRunResultError(ErrorLevel, EventStringFunctor, pt.FirstRunResultError): def code(self): return "Z028" @@ -2308,7 +2312,7 @@ def message(self) -> str: @dataclass -class AfterFirstRunResultError(ErrorLevel, pt.AfterFirstRunResultError): +class AfterFirstRunResultError(ErrorLevel, EventStringFunctor, pt.AfterFirstRunResultError): def code(self): return "Z029" @@ -2446,7 +2450,7 @@ def message(self) -> str: @dataclass -class GeneralWarningMsg(WarnLevel, pt.GeneralWarningMsg): +class GeneralWarningMsg(WarnLevel, EventStringFunctor, pt.GeneralWarningMsg): def code(self): return "Z046" @@ -2476,7 +2480,7 @@ def message(self) -> str: @dataclass -class RunResultWarningMessage(WarnLevel, pt.RunResultWarningMessage): +class RunResultWarningMessage(WarnLevel, EventStringFunctor, pt.RunResultWarningMessage): def code(self): return "Z049" diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index b9539ea19bd..db824e19bf1 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -976,9 +976,7 @@ def raise_patch_targets_not_found(patches): def _fix_dupe_msg(path_1: str, path_2: str, name: str, type_name: str) -> str: if path_1 == path_2: - return ( - f"remove one of the {type_name} entries for {name} in this file:\n" f" - {path_1!s}\n" - ) + return f"remove one of the {type_name} entries for {name} in this file:\n - {path_1!s}\n" else: return ( f"remove the {type_name} entry for {name} in one of these files:\n" diff --git a/core/dbt/graph/graph.py b/core/dbt/graph/graph.py index acfc43c2142..2dda596e073 100644 --- a/core/dbt/graph/graph.py +++ b/core/dbt/graph/graph.py @@ -90,7 +90,7 @@ def get_subset_graph(self, selected: Iterable[UniqueId]) -> "Graph": for node in include_nodes: if node not in new_graph: raise ValueError( - "Couldn't find model '{}' -- does it exist or is " "it disabled?".format(node) + "Couldn't find model '{}' -- does it exist or is it disabled?".format(node) ) return Graph(new_graph) diff --git a/core/dbt/graph/selector.py b/core/dbt/graph/selector.py index 3cb5f415be9..49b73fc71c4 100644 --- a/core/dbt/graph/selector.py +++ b/core/dbt/graph/selector.py @@ -26,7 +26,7 @@ def get_package_names(nodes): def alert_non_existence(raw_spec, nodes): if len(nodes) == 0: - warn_or_error(f"The selection criterion '{str(raw_spec)}' does not match" f" any nodes") + warn_or_error(f"The selection criterion '{str(raw_spec)}' does not match any nodes") def can_select_indirectly(node): diff --git a/core/dbt/logger.py b/core/dbt/logger.py index 3787b9a769b..4bbcfca4c06 100644 --- a/core/dbt/logger.py +++ b/core/dbt/logger.py @@ -28,9 +28,7 @@ colorama.init(wrap=True) STDOUT_LOG_FORMAT = "{record.message}" -DEBUG_LOG_FORMAT = ( - "{record.time:%Y-%m-%d %H:%M:%S.%f%z} " "({record.thread_name}): " "{record.message}" -) +DEBUG_LOG_FORMAT = "{record.time:%Y-%m-%d %H:%M:%S.%f%z} ({record.thread_name}): {record.message}" def get_secret_env() -> List[str]: diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index 2786a7c5744..4b9e666a421 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -347,7 +347,7 @@ def initial_config(self, fqn: List[str]) -> ContextConfig: ) else: raise InternalException( - f"Got an unexpected project version={config_version}, " f"expected 2" + f"Got an unexpected project version={config_version}, expected 2" ) def config_dict( diff --git a/core/dbt/parser/generic_test_builders.py b/core/dbt/parser/generic_test_builders.py index a0617f1689f..3dfb541cb8f 100644 --- a/core/dbt/parser/generic_test_builders.py +++ b/core/dbt/parser/generic_test_builders.py @@ -435,7 +435,7 @@ def tags(self) -> List[str]: tags = [tags] if not isinstance(tags, list): raise_compiler_error( - f"got {tags} ({type(tags)}) for tags, expected a list of " f"strings" + f"got {tags} ({type(tags)}) for tags, expected a list of strings" ) for tag in tags: if not isinstance(tag, str): diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index b73722952ce..8b22427cb39 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -298,7 +298,7 @@ def _parse_generic_test( except ParsingException as exc: context = _trimmed(str(target)) - msg = "Invalid test config given in {}:" "\n\t{}\n\t@: {}".format( + msg = "Invalid test config given in {}:\n\t{}\n\t@: {}".format( target.original_file_path, exc.msg, context ) raise ParsingException(msg) from exc diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py index 4757edab31e..1c55281db56 100644 --- a/core/dbt/parser/sources.py +++ b/core/dbt/parser/sources.py @@ -150,7 +150,7 @@ def parse_source(self, target: UnpatchedSourceDefinition) -> ParsedSourceDefinit if not isinstance(config, SourceConfig): raise InternalException( - f"Calculated a {type(config)} for a source, but expected " f"a SourceConfig" + f"Calculated a {type(config)} for a source, but expected a SourceConfig" ) default_database = self.root_project.credentials.database @@ -317,7 +317,7 @@ def get_unused_msg( unused_tables: Dict[SourceKey, Optional[Set[str]]], ) -> str: msg = [ - "During parsing, dbt encountered source overrides that had no " "target:", + "During parsing, dbt encountered source overrides that had no target:", ] for key, table_names in unused_tables.items(): patch = self.manifest.source_patches[key] diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index b20dd76b10d..1b067d79af8 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -461,7 +461,7 @@ def on_skip(self): print_run_result_error(result=self.skip_cause, newline=False) if self.skip_cause is None: # mypy appeasement raise InternalException( - "Skip cause not set but skip was somehow caused by " "an ephemeral failure" + "Skip cause not set but skip was somehow caused by an ephemeral failure" ) # set an error so dbt will exit with an error code error_message = ( diff --git a/core/dbt/task/compile.py b/core/dbt/task/compile.py index b091ae76099..740d35d37e9 100644 --- a/core/dbt/task/compile.py +++ b/core/dbt/task/compile.py @@ -64,7 +64,7 @@ def _get_deferred_manifest(self) -> Optional[WritableManifest]: state = self.previous_state if state is None: raise RuntimeException( - "Received a --defer argument, but no value was provided " "to --state" + "Received a --defer argument, but no value was provided to --state" ) if state.manifest is None: @@ -77,7 +77,7 @@ def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]): return if self.manifest is None: raise InternalException( - "Expected to defer to manifest, but there is no runtime " "manifest to defer from!" + "Expected to defer to manifest, but there is no runtime manifest to defer from!" ) self.manifest.merge_from_artifact( adapter=adapter, diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py index fa16bc5dd80..ab256334271 100644 --- a/core/dbt/task/freshness.py +++ b/core/dbt/task/freshness.py @@ -135,7 +135,7 @@ def execute(self, compiled_node, manifest): # broken, raise! if compiled_node.loaded_at_field is None: raise InternalException( - "Got to execute for source freshness of a source that has no " "loaded_at_field!" + "Got to execute for source freshness of a source that has no loaded_at_field!" ) relation = self.adapter.Relation.create_from_source(compiled_node) diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index 39289b3cacb..af0de610c98 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -174,7 +174,7 @@ def _runtime_initialize(self): self._flattened_nodes.append(self.manifest.sources[uid]) else: raise InternalException( - f"Node selection returned {uid}, expected a node or a " f"source" + f"Node selection returned {uid}, expected a node or a source" ) self.num_nodes = len([n for n in self._flattened_nodes if not n.is_ephemeral_model]) @@ -459,7 +459,7 @@ def run(self): if len(self._flattened_nodes) == 0: with TextOnly(): fire_event(EmptyLine()) - msg = "Nothing to do. Try checking your model " "configs and model specification args" + msg = "Nothing to do. Try checking your model configs and model specification args" warn_or_error(msg, log_fmt=warning_tag("{}")) result = self.get_result( results=[], diff --git a/core/dbt/utils.py b/core/dbt/utils.py index ccae3601446..b7cc6475319 100644 --- a/core/dbt/utils.py +++ b/core/dbt/utils.py @@ -491,11 +491,11 @@ def submit(*args, **kwargs): self, fn, *args = args elif not args: raise TypeError( - "descriptor 'submit' of 'SingleThreadedExecutor' object needs " "an argument" + "descriptor 'submit' of 'SingleThreadedExecutor' object needs an argument" ) else: raise TypeError( - "submit expected at least 1 positional argument, " "got %d" % (len(args) - 1) + "submit expected at least 1 positional argument, got %d" % (len(args) - 1) ) fut = concurrent.futures.Future() try: diff --git a/test/integration/027_cycle_tests/complex_cycle_models/model_a.sql b/test/integration/027_cycle_tests/complex_cycle_models/model_a.sql deleted file mode 100644 index 2cd691ea7b4..00000000000 --- a/test/integration/027_cycle_tests/complex_cycle_models/model_a.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select 1 as id diff --git a/test/integration/027_cycle_tests/complex_cycle_models/model_b.sql b/test/integration/027_cycle_tests/complex_cycle_models/model_b.sql deleted file mode 100644 index da16daedfdb..00000000000 --- a/test/integration/027_cycle_tests/complex_cycle_models/model_b.sql +++ /dev/null @@ -1,4 +0,0 @@ - -select * from {{ ref('model_a') }} -union all -select * from {{ ref('model_e') }} diff --git a/test/integration/027_cycle_tests/complex_cycle_models/model_c.sql b/test/integration/027_cycle_tests/complex_cycle_models/model_c.sql deleted file mode 100644 index 741b6cce028..00000000000 --- a/test/integration/027_cycle_tests/complex_cycle_models/model_c.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from {{ ref('model_b') }} diff --git a/test/integration/027_cycle_tests/complex_cycle_models/model_d.sql b/test/integration/027_cycle_tests/complex_cycle_models/model_d.sql deleted file mode 100644 index 954ca668936..00000000000 --- a/test/integration/027_cycle_tests/complex_cycle_models/model_d.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from {{ ref('model_c') }} diff --git a/test/integration/027_cycle_tests/complex_cycle_models/model_e.sql b/test/integration/027_cycle_tests/complex_cycle_models/model_e.sql deleted file mode 100644 index 9f689ae55df..00000000000 --- a/test/integration/027_cycle_tests/complex_cycle_models/model_e.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from {{ ref('model_e') }} diff --git a/test/integration/027_cycle_tests/complex_cycle_models/readme b/test/integration/027_cycle_tests/complex_cycle_models/readme deleted file mode 100644 index 5f95aba0473..00000000000 --- a/test/integration/027_cycle_tests/complex_cycle_models/readme +++ /dev/null @@ -1,7 +0,0 @@ - -The cycle in this graph looks like: - -A -> B -> C -> D - ^ | - | | - +--- E <--+ diff --git a/test/integration/027_cycle_tests/simple_cycle_models/model_a.sql b/test/integration/027_cycle_tests/simple_cycle_models/model_a.sql deleted file mode 100644 index 741b6cce028..00000000000 --- a/test/integration/027_cycle_tests/simple_cycle_models/model_a.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from {{ ref('model_b') }} diff --git a/test/integration/027_cycle_tests/simple_cycle_models/model_b.sql b/test/integration/027_cycle_tests/simple_cycle_models/model_b.sql deleted file mode 100644 index 67176d4b2b4..00000000000 --- a/test/integration/027_cycle_tests/simple_cycle_models/model_b.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from {{ ref('model_a') }} diff --git a/test/integration/027_cycle_tests/test_cycles.py b/test/integration/027_cycle_tests/test_cycles.py deleted file mode 100644 index 9312a76ac84..00000000000 --- a/test/integration/027_cycle_tests/test_cycles.py +++ /dev/null @@ -1,37 +0,0 @@ -from freezegun import freeze_time -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestSimpleCycle(DBTIntegrationTest): - - @property - def schema(self): - return "cycles_simple_027" - - @property - def models(self): - return "simple_cycle_models" - - @property - @use_profile('postgres') - def test_postgres_simple_cycle(self): - message = "Found a cycle.*" - with self.assertRaisesRegex(Exception, message): - self.run_dbt(["run"]) - -class TestComplexCycle(DBTIntegrationTest): - - @property - def schema(self): - return "cycles_complex_027" - - @property - def models(self): - return "complex_cycle_models" - - @property - @use_profile('postgres') - def test_postgres_simple_cycle(self): - message = "Found a cycle.*" - with self.assertRaisesRegex(Exception, message): - self.run_dbt(["run"]) diff --git a/test/integration/071_commented_yaml_regression_3568_tests/models/schema.yml b/test/integration/071_commented_yaml_regression_3568_tests/models/schema.yml deleted file mode 100644 index 35ab0fade65..00000000000 --- a/test/integration/071_commented_yaml_regression_3568_tests/models/schema.yml +++ /dev/null @@ -1,3 +0,0 @@ -# models/schema.yml -# only comments here -# https://github.com/dbt-labs/dbt-core/issues/3568 \ No newline at end of file diff --git a/test/integration/071_commented_yaml_regression_3568_tests/test_all_comment_yml_files.py b/test/integration/071_commented_yaml_regression_3568_tests/test_all_comment_yml_files.py deleted file mode 100644 index c9f6b2d99f1..00000000000 --- a/test/integration/071_commented_yaml_regression_3568_tests/test_all_comment_yml_files.py +++ /dev/null @@ -1,20 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestAllCommentYMLIsOk(DBTIntegrationTest): - @property - def schema(self): - return "071_commented_yaml" - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_parses_with_all_comment_yml(self): - try: - self.run_dbt(['parse']) - except TypeError: - assert False, '`dbt parse` failed with a yaml file that is all comments with the same exception as 3568' - except: - assert False, '`dbt parse` failed with a yaml file that is all comments' diff --git a/tests/functional/artifacts/expected_manifest.py b/tests/functional/artifacts/expected_manifest.py index 23e396400e9..32c9dcfbfa1 100644 --- a/tests/functional/artifacts/expected_manifest.py +++ b/tests/functional/artifacts/expected_manifest.py @@ -1275,7 +1275,7 @@ def expected_references_manifest(project): }, "test.view_summary": { "block_contents": ( - "A view of the summary of the ephemeral copy of the " "seed data" + "A view of the summary of the ephemeral copy of the seed data" ), "name": "view_summary", "original_file_path": docs_path, diff --git a/tests/functional/cycles/test_cycles.py b/tests/functional/cycles/test_cycles.py new file mode 100644 index 00000000000..0e2cdcaf911 --- /dev/null +++ b/tests/functional/cycles/test_cycles.py @@ -0,0 +1,71 @@ +import pytest + +from dbt.tests.util import run_dbt + +model_a_sql = """ +select * from {{ ref('model_b') }} +""" + +model_b_sql = """ +select * from {{ ref('model_a') }} +""" + +complex_cycle__model_a_sql = """ +select 1 as id +""" + +complex_cycle__model_b_sql = """ +select * from {{ ref('model_a') }}s +union all +select * from {{ ref('model_e') }} +""" + +complex_cycle__model_c_sql = """ +select * from {{ ref('model_b') }} +""" + +complex_cycle__model_d_sql = """ +select * from {{ ref('model_c') }} +""" + +complex_cycle__model_e_sql = """ +select * from {{ ref('model_e') }} +""" + + +class TestSimpleCycle: + @pytest.fixture(scope="class") + def models(self): + return { + "model_a.sql": model_a_sql, + "model_b.sql": model_b_sql + } + + def test_simple_cycle(self, project): + with pytest.raises(RuntimeError) as exc: + run_dbt(["run"]) + expected_msg = "Found a cycle" + assert expected_msg in str(exc.value) + + +class TestComplexCycle: + @pytest.fixture(scope="class") + def models(self): + # The cycle in this graph looks like: + # A -> B -> C -> D + # ^ | + # | | + # +--- E <--+ + return { + "model_a.sql": complex_cycle__model_a_sql, + "model_b.sql": complex_cycle__model_b_sql, + "model_c.sql": complex_cycle__model_c_sql, + "model_d.sql": complex_cycle__model_d_sql, + "model_e.sql": complex_cycle__model_e_sql, + } + + def test_complex_cycle(self, project): + with pytest.raises(RuntimeError) as exc: + run_dbt(["run"]) + expected_msg = "Found a cycle" + assert expected_msg in str(exc.value) diff --git a/tests/functional/schema_tests/fixtures.py b/tests/functional/schema_tests/fixtures.py index 40fe9f5a086..7e0dfbaca58 100644 --- a/tests/functional/schema_tests/fixtures.py +++ b/tests/functional/schema_tests/fixtures.py @@ -536,6 +536,10 @@ """ +all_quotes_schema__schema_yml = """# models/schema.yml +# only comments here, which should be okay! +# https://github.com/dbt-labs/dbt-core/issues/3568""" + models_v2__render_test_cli_arg_models__schema_yml = """ version: 2 diff --git a/tests/functional/schema_tests/test_schema_v2_tests.py b/tests/functional/schema_tests/test_schema_v2_tests.py index 07c3b87e63e..00c14cd711b 100644 --- a/tests/functional/schema_tests/test_schema_v2_tests.py +++ b/tests/functional/schema_tests/test_schema_v2_tests.py @@ -93,6 +93,7 @@ macro_resolution_order_models__config_yml, macro_resolution_order_models__my_model_sql, alt_local_utils__macros__type_timestamp_sql, + all_quotes_schema__schema_yml, ) from dbt.exceptions import ParsingException, CompilationException from dbt.contracts.results import TestStatus @@ -991,6 +992,24 @@ def test_invalid_schema_file( assert re.search(r"'models' is not a list", str(exc)) +class TestCommentedSchema: + @pytest.fixture(scope="class") + def models(self): + return { + "schema.yml": all_quotes_schema__schema_yml, + "model.sql": invalid_schema_models__model_sql, + } + + def test_quoted_schema_file(self, project): + try: + # A schema file consisting entirely of quotes should not be a problem + run_dbt(['parse']) + except TypeError: + assert False, '`dbt parse` failed with a yaml file that is all comments with the same exception as 3568' + except Exception: + assert False, '`dbt parse` failed with a yaml file that is all comments' + + class TestWrongSpecificationBlock: @pytest.fixture(scope="class") def models(self): diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 4f403d04473..c2064b84c1a 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -75,6 +75,15 @@ def test_formatting(self): event = AdapterEventDebug(name="dbt_tests", base_msg="boop{x}boop", args=()) assert "boop{x}boop" in event.message() + # ensure AdapterLogger and subclasses makes all base_msg members + # of type string; when someone writes logger.debug(a) where a is + # any non-string object + event = AdapterEventDebug(name="dbt_tests", base_msg=[1,2,3], args=(3,)) + assert isinstance(event.base_msg, str) + + event = MacroEventDebug(msg=[1,2,3]) + assert isinstance(event.msg, str) + class TestEventCodes: diff --git a/tox.ini b/tox.ini index 89f2ac41204..109e8b4f62f 100644 --- a/tox.ini +++ b/tox.ini @@ -4,6 +4,7 @@ envlist = unit,integration [testenv:{unit,py37,py38,py39,py310,py}] description = unit testing +download = true skip_install = true passenv = DBT_* PYTEST_ADDOPTS commands = @@ -15,6 +16,7 @@ deps = [testenv:{integration,py37-integration,py38-integration,py39-integration,py310-integration,py-integration}] description = adapter plugin integration testing +download = true skip_install = true passenv = DBT_* POSTGRES_TEST_* PYTEST_ADDOPTS commands = From 1ec54abdc4933918ca09faf3ff9041050f844e48 Mon Sep 17 00:00:00 2001 From: Ian Knox <81931810+iknox-fa@users.noreply.github.com> Date: Tue, 25 Oct 2022 14:14:08 -0500 Subject: [PATCH 03/54] Logging works with Click (#6088) --- .../Under the Hood-20221017-170500.yaml | 7 ++ core/dbt/cli/main.py | 17 +-- core/dbt/cli/params.py | 3 +- core/dbt/clients/system.py | 33 ++++-- core/dbt/events/functions.py | 109 +++++++++--------- core/dbt/main.py | 4 +- core/dbt/tests/fixtures/project.py | 2 +- test/integration/base.py | 2 +- test/unit/test_system_client.py | 11 ++ 9 files changed, 112 insertions(+), 76 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221017-170500.yaml diff --git a/.changes/unreleased/Under the Hood-20221017-170500.yaml b/.changes/unreleased/Under the Hood-20221017-170500.yaml new file mode 100644 index 00000000000..8f5e20cd8ff --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221017-170500.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Click CLI supports logging +time: 2022-10-17T17:05:00.478948-05:00 +custom: + Author: iknox-fa + Issue: "5530" + PR: "6088" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 739ada4a841..22e9f648505 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -6,6 +6,7 @@ from dbt.adapters.factory import adapter_management from dbt.cli import params as p from dbt.cli.flags import Flags +from dbt.events.functions import setup_event_logger from dbt.profiler import profiler from dbt.tracking import initialize_from_flags, track_run @@ -36,6 +37,7 @@ def cli_runner(): @p.fail_fast @p.log_cache_events @p.log_format +@p.log_path @p.macro_debugging @p.partial_parse @p.print @@ -55,6 +57,14 @@ def cli(ctx, **kwargs): """ flags = Flags() + # Logging + # N.B. Legacy logger is not supported + setup_event_logger( + flags.LOG_PATH, + flags.LOG_FORMAT, + flags.USE_COLORS, + flags.DEBUG, + ) # Tracking initialize_from_flags(flags.ANONYMOUS_USAGE_STATS, flags.PROFILES_DIR) ctx.with_resource(track_run(run_command=ctx.invoked_subcommand)) @@ -82,7 +92,6 @@ def cli(ctx, **kwargs): @p.fail_fast @p.full_refresh @p.indirect_selection -@p.log_path @p.models @p.profile @p.profiles_dir @@ -129,7 +138,6 @@ def docs(ctx, **kwargs): @p.compile_docs @p.defer @p.exclude -@p.log_path @p.models @p.profile @p.profiles_dir @@ -169,7 +177,6 @@ def docs_serve(ctx, **kwargs): @p.defer @p.exclude @p.full_refresh -@p.log_path @p.models @p.parse_only @p.profile @@ -259,7 +266,6 @@ def list(ctx, **kwargs): @cli.command("parse") @click.pass_context @p.compile_parse -@p.log_path @p.profile @p.profiles_dir @p.project_dir @@ -282,7 +288,6 @@ def parse(ctx, **kwargs): @p.exclude @p.fail_fast @p.full_refresh -@p.log_path @p.models @p.profile @p.profiles_dir @@ -320,7 +325,6 @@ def run_operation(ctx, **kwargs): @click.pass_context @p.exclude @p.full_refresh -@p.log_path @p.models @p.profile @p.profiles_dir @@ -393,7 +397,6 @@ def freshness(ctx, **kwargs): @p.exclude @p.fail_fast @p.indirect_selection -@p.log_path @p.models @p.profile @p.profiles_dir diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index 1661e6e8c55..d092d7eae51 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -131,7 +131,8 @@ "--log-path", envvar="DBT_LOG_PATH", help="Configure the 'log-path'. Only applies this setting for the current run. Overrides the 'DBT_LOG_PATH' if it is set.", - type=click.Path(), + default=Path.cwd() / "logs", + type=click.Path(resolve_path=True, path_type=Path), ) macro_debugging = click.option( diff --git a/core/dbt/clients/system.py b/core/dbt/clients/system.py index b1cd1b5a074..d1b1c461f50 100644 --- a/core/dbt/clients/system.py +++ b/core/dbt/clients/system.py @@ -1,30 +1,32 @@ import errno -import functools import fnmatch +import functools import json import os import os.path import re import shutil +import stat import subprocess import sys import tarfile -import requests -import stat -from typing import Type, NoReturn, List, Optional, Dict, Any, Tuple, Callable, Union -from pathspec import PathSpec # type: ignore +from pathlib import Path +from typing import Any, Callable, Dict, List, NoReturn, Optional, Tuple, Type, Union +import dbt.exceptions +import requests from dbt.events.functions import fire_event from dbt.events.types import ( - SystemErrorRetrievingModTime, SystemCouldNotWrite, + SystemErrorRetrievingModTime, SystemExecutingCmd, - SystemStdOutMsg, - SystemStdErrMsg, SystemReportReturnCode, + SystemStdErrMsg, + SystemStdOutMsg, ) -import dbt.exceptions +from dbt.exceptions import InternalException from dbt.utils import _connection_exception_retry as connection_exception_retry +from pathspec import PathSpec # type: ignore if sys.platform == "win32": from ctypes import WinDLL, c_bool @@ -106,12 +108,18 @@ def load_file_contents(path: str, strip: bool = True) -> str: return to_return -def make_directory(path: str) -> None: +@functools.singledispatch +def make_directory(path=None) -> None: """ Make a directory and any intermediate directories that don't already exist. This function handles the case where two threads try to create a directory at once. """ + raise InternalException(f"Can not create directory from {type(path)} ") + + +@make_directory.register +def _(path: str) -> None: path = convert_path(path) if not os.path.exists(path): # concurrent writes that try to create the same dir can fail @@ -125,6 +133,11 @@ def make_directory(path: str) -> None: raise e +@make_directory.register +def _(path: Path) -> None: + path.mkdir(parents=True, exist_ok=True) + + def make_file(path: str, contents: str = "", overwrite: bool = False) -> bool: """ Make a file at `path` assuming that the directory it resides in already diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index 122171bc8bf..7a652a998f6 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -1,63 +1,55 @@ import betterproto -from colorama import Style -from dbt.events.base_types import NoStdOut, BaseEvent, NoFile, Cache -from dbt.events.types import EventBufferFull, MainReportVersion, EmptyLine -import dbt.flags as flags -from dbt.constants import SECRET_ENV_PREFIX, METADATA_ENV_PREFIX - -from dbt.logger import make_log_dir_if_missing, GLOBAL_LOGGER -from datetime import datetime -import json import io -from io import StringIO, TextIOWrapper -import logbook +import json import logging -from logging import Logger -import sys -from logging.handlers import RotatingFileHandler import os -import uuid +import sys import threading -from typing import List, Optional, Union, Callable, Dict +import uuid from collections import deque +from datetime import datetime +from io import StringIO, TextIOWrapper +from logging import Logger +from logging.handlers import RotatingFileHandler +from typing import Callable, Dict, List, Optional, Union + +import dbt.flags as flags +import logbook +from colorama import Style +from dbt.constants import METADATA_ENV_PREFIX, SECRET_ENV_PREFIX +from dbt.events.base_types import BaseEvent, Cache, NoFile, NoStdOut +from dbt.events.types import EmptyLine, EventBufferFull, MainReportVersion +from dbt.logger import make_log_dir_if_missing -LOG_VERSION = 3 +# create the module-globals +LOG_VERSION = 2 EVENT_HISTORY = None -# create the global file logger with no configuration -FILE_LOG = logging.getLogger("default_file") -null_handler = logging.NullHandler() -FILE_LOG.addHandler(null_handler) - -# set up logger to go to stdout with defaults -# setup_event_logger will be called once args have been parsed -STDOUT_LOG = logging.getLogger("default_stdout") -STDOUT_LOG.setLevel(logging.INFO) -stdout_handler = logging.StreamHandler(sys.stdout) -stdout_handler.setLevel(logging.INFO) -STDOUT_LOG.addHandler(stdout_handler) - -format_color = True -format_json = False +DEFAULT_FILE_LOGGER_NAME = "default_file" +FILE_LOG = logging.getLogger(DEFAULT_FILE_LOGGER_NAME) + +DEFAULT_STDOUT_LOGGER_NAME = "default_std_out" +STDOUT_LOG = logging.getLogger(DEFAULT_STDOUT_LOGGER_NAME) + invocation_id: Optional[str] = None metadata_vars: Optional[Dict[str, str]] = None -def setup_event_logger(log_path, level_override=None): - global format_json, format_color, STDOUT_LOG, FILE_LOG +def setup_event_logger(log_path, log_format, use_colors, debug): + global FILE_LOG + global STDOUT_LOG + make_log_dir_if_missing(log_path) - format_json = flags.LOG_FORMAT == "json" - # USE_COLORS can be None if the app just started and the cli flags - # havent been applied yet - format_color = True if flags.USE_COLORS else False # TODO this default should live somewhere better log_dest = os.path.join(log_path, "dbt.log") - level = level_override or (logging.DEBUG if flags.DEBUG else logging.INFO) + level = logging.DEBUG if debug else logging.INFO # overwrite the STDOUT_LOG logger with the configured one STDOUT_LOG = logging.getLogger("configured_std_out") STDOUT_LOG.setLevel(level) + setattr(STDOUT_LOG, "format_json", log_format == "json") + setattr(STDOUT_LOG, "format_color", True if use_colors else False) FORMAT = "%(message)s" stdout_passthrough_formatter = logging.Formatter(fmt=FORMAT) @@ -76,6 +68,8 @@ def setup_event_logger(log_path, level_override=None): # overwrite the FILE_LOG logger with the configured one FILE_LOG = logging.getLogger("configured_file") FILE_LOG.setLevel(logging.DEBUG) # always debug regardless of user input + setattr(FILE_LOG, "format_json", log_format == "json") + setattr(FILE_LOG, "format_color", True if use_colors else False) file_passthrough_formatter = logging.Formatter(fmt=FORMAT) @@ -93,7 +87,7 @@ def capture_stdout_logs() -> StringIO: global STDOUT_LOG capture_buf = io.StringIO() stdout_capture_handler = logging.StreamHandler(capture_buf) - stdout_handler.setLevel(logging.DEBUG) + stdout_capture_handler.setLevel(logging.DEBUG) STDOUT_LOG.addHandler(stdout_capture_handler) return capture_buf @@ -146,8 +140,7 @@ def event_to_dict(event: BaseEvent) -> dict: # translates an Event to a completely formatted text-based log line # type hinting everything as strings so we don't get any unintentional string conversions via str() def reset_color() -> str: - global format_color - return "" if not format_color else Style.RESET_ALL + return Style.RESET_ALL if getattr(STDOUT_LOG, "format_color", False) else "" def create_info_text_log_line(e: BaseEvent) -> str: @@ -189,8 +182,27 @@ def create_json_log_line(e: BaseEvent) -> Optional[str]: # calls create_stdout_text_log_line() or create_json_log_line() according to logger config def create_log_line(e: BaseEvent, file_output=False) -> Optional[str]: - global format_json - if format_json: + global FILE_LOG + global STDOUT_LOG + + if FILE_LOG.name == DEFAULT_FILE_LOGGER_NAME and STDOUT_LOG.name == DEFAULT_STDOUT_LOGGER_NAME: + + # TODO: This is only necessary because our test framework doesn't correctly set up logging. + # This code should be moved to the test framework when we do CT-XXX (tix # needed) + null_handler = logging.NullHandler() + FILE_LOG.addHandler(null_handler) + setattr(FILE_LOG, "format_json", False) + setattr(FILE_LOG, "format_color", False) + + stdout_handler = logging.StreamHandler(sys.stdout) + stdout_handler.setLevel(logging.INFO) + STDOUT_LOG.setLevel(logging.INFO) + STDOUT_LOG.addHandler(stdout_handler) + setattr(STDOUT_LOG, "format_json", False) + setattr(STDOUT_LOG, "format_color", False) + + logger = FILE_LOG if file_output else STDOUT_LOG + if getattr(logger, "format_json"): return create_json_log_line(e) # json output, both console and file elif file_output is True or flags.DEBUG: return create_debug_text_log_line(e) # default file output @@ -238,15 +250,6 @@ def fire_event(e: BaseEvent) -> None: add_to_event_history(e) - # backwards compatibility for plugins that require old logger (dbt-rpc) - if flags.ENABLE_LEGACY_LOGGER: - # using Event::message because the legacy logger didn't differentiate messages by - # destination - log_line = create_log_line(e) - if log_line: - send_to_logger(GLOBAL_LOGGER, e.level_tag(), log_line) - return # exit the function to avoid using the current logger as well - # always logs debug level regardless of user input if not isinstance(e, NoFile): log_line = create_log_line(e, file_output=True) diff --git a/core/dbt/main.py b/core/dbt/main.py index f1627555d7a..24b053d0997 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -211,9 +211,7 @@ def run_from_args(parsed): if task.config is not None: log_path = getattr(task.config, "log_path", None) log_manager.set_path(log_path) - # if 'list' task: set stdout to WARN instead of INFO - level_override = parsed.cls.pre_init_hook(parsed) - setup_event_logger(log_path or "logs", level_override) + setup_event_logger(log_path or "logs", "json", False, True) fire_event(MainReportVersion(version=str(dbt.version.installed), log_version=LOG_VERSION)) fire_event(MainReportArgs(args=args_to_dict(parsed))) diff --git a/core/dbt/tests/fixtures/project.py b/core/dbt/tests/fixtures/project.py index 5da885edf9b..fe97176cfb6 100644 --- a/core/dbt/tests/fixtures/project.py +++ b/core/dbt/tests/fixtures/project.py @@ -456,7 +456,7 @@ def project( # Logbook warnings are ignored so we don't have to fork logbook to support python 3.10. # This _only_ works for tests in `tests/` that use the project fixture. warnings.filterwarnings("ignore", category=DeprecationWarning, module="logbook") - setup_event_logger(logs_dir) + setup_event_logger(logs_dir, "json", False, False) orig_cwd = os.getcwd() os.chdir(project_root) # Return whatever is needed later in tests but can only come from fixtures, so we can keep diff --git a/test/integration/base.py b/test/integration/base.py index b2e55159d6b..9726ff7d482 100644 --- a/test/integration/base.py +++ b/test/integration/base.py @@ -313,7 +313,7 @@ def setUp(self): os.chdir(self.initial_dir) # before we go anywhere, collect the initial path info self._logs_dir = os.path.join(self.initial_dir, 'logs', self.prefix) - setup_event_logger(self._logs_dir) + setup_event_logger(self._logs_dir, None, False, True) _really_makedirs(self._logs_dir) self.test_original_source_path = _pytest_get_test_root() self.test_root_dir = self._generate_test_root_dir() diff --git a/test/unit/test_system_client.py b/test/unit/test_system_client.py index 9bf239c0650..63316b26852 100644 --- a/test/unit/test_system_client.py +++ b/test/unit/test_system_client.py @@ -53,6 +53,17 @@ def test__make_file_with_overwrite(self): self.assertTrue(written) self.assertEqual(self.get_profile_text(), 'NEW_TEXT') + def test__make_dir_from_str(self): + test_dir_str = self.tmp_dir + "/test_make_from_str/sub_dir" + dbt.clients.system.make_directory(test_dir_str) + self.assertTrue(Path(test_dir_str).is_dir()) + + def test__make_dir_from_pathobj(self): + test_dir_pathobj = Path(self.tmp_dir + "/test_make_from_pathobj/sub_dir") + dbt.clients.system.make_directory(test_dir_pathobj) + self.assertTrue(test_dir_pathobj.is_dir()) + + class TestRunCmd(unittest.TestCase): """Test `run_cmd`. From a0ec0b6f9db74701ee701f2415ad47c8b91300ba Mon Sep 17 00:00:00 2001 From: Chenyu Li Date: Tue, 15 Nov 2022 15:54:05 -0800 Subject: [PATCH 04/54] Project working with Click (#6142) Co-authored-by: MichelleArk Co-authored-by: Emily Rockman --- .github/workflows/main.yml | 12 ++++---- .pre-commit-config.yaml | 2 +- core/dbt/adapters/base/plugin.py | 4 +-- core/dbt/cli/main.py | 8 ++++- core/dbt/cli/option_types.py | 8 +++-- core/dbt/config/__init__.py | 2 +- core/dbt/config/profile.py | 3 +- core/dbt/config/project.py | 13 ++------ core/dbt/config/renderer.py | 2 +- core/dbt/config/runtime.py | 51 ++++++++++++++++++++++---------- core/dbt/context/configured.py | 3 +- core/dbt/context/target.py | 13 ++------ core/dbt/task/debug.py | 4 +-- test/unit/utils.py | 2 -- 14 files changed, 72 insertions(+), 55 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 00339110483..4de07d83c07 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -45,7 +45,9 @@ jobs: uses: actions/checkout@v2 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4.3.0 + with: + python-version: '3.8' - name: Install python dependencies run: | @@ -82,7 +84,7 @@ jobs: uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4.3.0 with: python-version: ${{ matrix.python-version }} @@ -137,7 +139,7 @@ jobs: uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4.3.0 with: python-version: ${{ matrix.python-version }} @@ -190,9 +192,9 @@ jobs: uses: actions/checkout@v2 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4.3.0 with: - python-version: 3.8 + python-version: '3.8' - name: Install python dependencies run: | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a0290fdf762..6877497ae37 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,7 +30,7 @@ repos: args: - "--check" - "--diff" -- repo: https://gitlab.com/pycqa/flake8 +- repo: https://github.com/pycqa/flake8 rev: 4.0.1 hooks: - id: flake8 diff --git a/core/dbt/adapters/base/plugin.py b/core/dbt/adapters/base/plugin.py index f0d348d8f57..f841ac772c2 100644 --- a/core/dbt/adapters/base/plugin.py +++ b/core/dbt/adapters/base/plugin.py @@ -7,9 +7,9 @@ def project_name_from_path(include_path: str) -> str: # avoid an import cycle - from dbt.config.project import Project + from dbt.config.project import PartialProject - partial = Project.partial_load(include_path) + partial = PartialProject.from_project_root(include_path) if partial.project_name is None: raise CompilationException(f"Invalid project at {include_path}: name not set!") return partial.project_name diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 22e9f648505..09753a4d4cb 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -9,6 +9,7 @@ from dbt.events.functions import setup_event_logger from dbt.profiler import profiler from dbt.tracking import initialize_from_flags, track_run +from dbt.config.runtime import load_project def cli_runner(): @@ -55,8 +56,8 @@ def cli(ctx, **kwargs): """An ELT tool for managing your SQL transformations and data models. For more documentation on these commands, visit: docs.getdbt.com """ + ctx.obj = {} flags = Flags() - # Logging # N.B. Legacy logger is not supported setup_event_logger( @@ -73,6 +74,11 @@ def cli(ctx, **kwargs): if flags.RECORD_TIMING_INFO: ctx.with_resource(profiler(enable=True, outfile=flags.RECORD_TIMING_INFO)) + # TODO need profile to exisit + profile = None + + # project need profile to render because it requires knowing Target + ctx.obj["project"] = load_project(flags.PROJECT_DIR, flags.VERSION_CHECK, profile, flags.VARS) # Adapter management ctx.with_resource(adapter_management()) diff --git a/core/dbt/cli/option_types.py b/core/dbt/cli/option_types.py index 523df651775..1df8bef1f7a 100644 --- a/core/dbt/cli/option_types.py +++ b/core/dbt/cli/option_types.py @@ -1,5 +1,7 @@ from click import ParamType -import yaml + +from dbt.config.utils import parse_cli_vars +from dbt.exceptions import ValidationException class YAML(ParamType): @@ -12,8 +14,8 @@ def convert(self, value, param, ctx): if not isinstance(value, str): self.fail(f"Cannot load YAML from type {type(value)}", param, ctx) try: - return yaml.load(value, Loader=yaml.Loader) - except yaml.parser.ParserError: + return parse_cli_vars(value) + except ValidationException: self.fail(f"String '{value}' is not valid YAML", param, ctx) diff --git a/core/dbt/config/__init__.py b/core/dbt/config/__init__.py index d61af81628e..5988ba589c3 100644 --- a/core/dbt/config/__init__.py +++ b/core/dbt/config/__init__.py @@ -1,4 +1,4 @@ # all these are just exports, they need "noqa" so flake8 will not complain. from .profile import Profile, read_user_config # noqa -from .project import Project, IsFQNResource # noqa +from .project import Project, IsFQNResource, PartialProject # noqa from .runtime import RuntimeConfig, UnsetProfileConfig # noqa diff --git a/core/dbt/config/profile.py b/core/dbt/config/profile.py index 39679baa109..d8408abd0e4 100644 --- a/core/dbt/config/profile.py +++ b/core/dbt/config/profile.py @@ -421,8 +421,9 @@ def render_from_args( """ threads_override = getattr(args, "threads", None) target_override = getattr(args, "target", None) + profile_name_override = getattr(args, "profile", None) raw_profiles = read_profile(flags.PROFILES_DIR) - profile_name = cls.pick_profile_name(getattr(args, "profile", None), project_profile_name) + profile_name = cls.pick_profile_name(profile_name_override, project_profile_name) return cls.from_raw_profiles( raw_profiles=raw_profiles, profile_name=profile_name, diff --git a/core/dbt/config/project.py b/core/dbt/config/project.py index 9521dd29882..2e60e9b7d5d 100644 --- a/core/dbt/config/project.py +++ b/core/dbt/config/project.py @@ -156,7 +156,7 @@ def value_or(value: Optional[T], default: T) -> T: return value -def _raw_project_from(project_root: str) -> Dict[str, Any]: +def load_raw_project(project_root: str) -> Dict[str, Any]: project_root = os.path.normpath(project_root) project_yaml_filepath = os.path.join(project_root, "dbt_project.yml") @@ -485,7 +485,7 @@ def from_project_root( cls, project_root: str, *, verify_version: bool = False ) -> "PartialProject": project_root = os.path.normpath(project_root) - project_dict = _raw_project_from(project_root) + project_dict = load_raw_project(project_root) config_version = project_dict.get("config-version", 1) if config_version != 2: raise DbtProjectError( @@ -644,13 +644,6 @@ def validate(self): except ValidationError as e: raise DbtProjectError(validator_error_message(e)) from e - @classmethod - def partial_load(cls, project_root: str, *, verify_version: bool = False) -> PartialProject: - return PartialProject.from_project_root( - project_root, - verify_version=verify_version, - ) - @classmethod def from_project_root( cls, @@ -659,7 +652,7 @@ def from_project_root( *, verify_version: bool = False, ) -> "Project": - partial = cls.partial_load(project_root, verify_version=verify_version) + partial = PartialProject.from_project_root(project_root, verify_version=verify_version) return partial.render(renderer) def hashed_name(self): diff --git a/core/dbt/config/renderer.py b/core/dbt/config/renderer.py index 8fc4211754e..0a5be710ec4 100644 --- a/core/dbt/config/renderer.py +++ b/core/dbt/config/renderer.py @@ -107,7 +107,7 @@ def __init__( if cli_vars is None: cli_vars = {} if profile: - self.ctx_obj = TargetContext(profile, cli_vars) + self.ctx_obj = TargetContext(profile.to_target_dict(), cli_vars) else: self.ctx_obj = BaseContext(cli_vars) # type:ignore context = self.ctx_obj.to_dict() diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index a8edb9b096a..82f6278d381 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -13,7 +13,8 @@ from dbt.adapters.factory import get_relation_class_by_name, get_include_paths from dbt.helper_types import FQNPath, PathSet, DictDefaultEmptyStr from dbt.config.profile import read_user_config -from dbt.contracts.connection import AdapterRequiredConfig, Credentials +from dbt.config.project import load_raw_project +from dbt.contracts.connection import AdapterRequiredConfig, Credentials, HasCredentials from dbt.contracts.graph.manifest import ManifestMetadata from dbt.contracts.relation import ComponentName from dbt.ui import warning_tag @@ -30,6 +31,23 @@ from dbt.dataclass_schema import ValidationError +def load_project( + project_root: str, + version_check: bool, + profile: HasCredentials, + cli_vars: Optional[Dict[str, Any]] = None, +) -> Project: + # get the project with all of the provided information + project_renderer = DbtProjectYamlRenderer(profile, cli_vars) + project = Project.from_project_root( + project_root, project_renderer, verify_version=version_check + ) + + # Save env_vars encountered in rendering for partial parsing + project.project_env_vars = project_renderer.ctx_obj.env_vars + return project + + def _project_quoting_dict(proj: Project, profile: Profile) -> Dict[ComponentName, bool]: src: Dict[str, Any] = profile.credentials.translate_aliases(proj.quoting) result: Dict[ComponentName, bool] = {} @@ -189,28 +207,31 @@ def _get_rendered_profile( return Profile.render_from_args(args, profile_renderer, profile_name) @classmethod - def collect_parts(cls: Type["RuntimeConfig"], args: Any) -> Tuple[Project, Profile]: - # profile_name from the project - project_root = args.project_dir if args.project_dir else os.getcwd() - version_check = bool(flags.VERSION_CHECK) - partial = Project.partial_load(project_root, verify_version=version_check) - + def get_profile( + cls: Type["RuntimeConfig"], args: Any, cli_vars: Dict[str, Any], raw_profile_name: str + ) -> Profile: # build the profile using the base renderer and the one fact we know # Note: only the named profile section is rendered. The rest of the # profile is ignored. - cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}")) profile_renderer = ProfileRenderer(cli_vars) - profile_name = partial.render_profile_name(profile_renderer) + profile_name = profile_renderer.render_value(raw_profile_name) profile = cls._get_rendered_profile(args, profile_renderer, profile_name) # Save env_vars encountered in rendering for partial parsing profile.profile_env_vars = profile_renderer.ctx_obj.env_vars + return profile + + @classmethod + def collect_parts(cls: Type["RuntimeConfig"], args: Any) -> Tuple[Project, Profile]: + # profile_name from the project + project_root = args.project_dir if args.project_dir else os.getcwd() + raw_project = load_raw_project(project_root) + raw_profile_name: str = raw_project.get("profile") # type: ignore + cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}")) + + profile = cls.get_profile(args, cli_vars, raw_profile_name) + + project = load_project(project_root, bool(flags.VERSION_CHECK), profile, cli_vars) - # get a new renderer using our target information and render the - # project - project_renderer = DbtProjectYamlRenderer(profile, cli_vars) - project = partial.render(project_renderer) - # Save env_vars encountered in rendering for partial parsing - project.project_env_vars = project_renderer.ctx_obj.env_vars return (project, profile) # Called in main.py, lib.py, task/base.py diff --git a/core/dbt/context/configured.py b/core/dbt/context/configured.py index ae2ee10baec..64fdcd935b3 100644 --- a/core/dbt/context/configured.py +++ b/core/dbt/context/configured.py @@ -16,7 +16,8 @@ class ConfiguredContext(TargetContext): config: AdapterRequiredConfig def __init__(self, config: AdapterRequiredConfig) -> None: - super().__init__(config, config.cli_vars) + super().__init__(config.to_target_dict(), config.cli_vars) + self.config = config @contextproperty def project_name(self) -> str: diff --git a/core/dbt/context/target.py b/core/dbt/context/target.py index d50a72e08f6..a6d587269d5 100644 --- a/core/dbt/context/target.py +++ b/core/dbt/context/target.py @@ -1,15 +1,13 @@ from typing import Any, Dict -from dbt.contracts.connection import HasCredentials - from dbt.context.base import BaseContext, contextproperty class TargetContext(BaseContext): # subclass is ConfiguredContext - def __init__(self, config: HasCredentials, cli_vars: Dict[str, Any]): + def __init__(self, target_dict: Dict[str, Any], cli_vars: Dict[str, Any]): super().__init__(cli_vars=cli_vars) - self.config = config + self.target_dict = target_dict @contextproperty def target(self) -> Dict[str, Any]: @@ -73,9 +71,4 @@ def target(self) -> Dict[str, Any]: |----------|-----------|------------------------------------------| """ - return self.config.to_target_dict() - - -def generate_target_context(config: HasCredentials, cli_vars: Dict[str, Any]) -> Dict[str, Any]: - ctx = TargetContext(config, cli_vars) - return ctx.to_dict() + return self.target_dict diff --git a/core/dbt/task/debug.py b/core/dbt/task/debug.py index 5f3e3854759..d787a09d429 100644 --- a/core/dbt/task/debug.py +++ b/core/dbt/task/debug.py @@ -10,7 +10,7 @@ import dbt.clients.system import dbt.exceptions from dbt.adapters.factory import get_adapter, register_adapter -from dbt.config import Project, Profile +from dbt.config import PartialProject, Project, Profile from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer from dbt.config.utils import parse_cli_vars from dbt.clients.yaml_helper import load_yaml_text @@ -171,7 +171,7 @@ def _choose_profile_names(self) -> Optional[List[str]]: project_profile: Optional[str] = None if os.path.exists(self.project_path): try: - partial = Project.partial_load( + partial = PartialProject.from_project_root( os.path.dirname(self.project_path), verify_version=bool(flags.VERSION_CHECK), ) diff --git a/test/unit/utils.py b/test/unit/utils.py index 4cfe0519d44..a66f97d7ed9 100644 --- a/test/unit/utils.py +++ b/test/unit/utils.py @@ -56,8 +56,6 @@ def profile_from_dict(profile, profile_name, cli_vars='{}'): def project_from_dict(project, profile, packages=None, selectors=None, cli_vars='{}'): - from dbt.context.target import generate_target_context - from dbt.config import Project from dbt.config.renderer import DbtProjectYamlRenderer from dbt.config.utils import parse_cli_vars if not isinstance(cli_vars, dict): From 44b457c191bf58ad58a0f350daf72aadc4d39693 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Tue, 22 Nov 2022 17:52:59 -0500 Subject: [PATCH 05/54] DepsTask with Click (#6260) * deps with click, default --vars param, PartialProject.render_package_metadata --- core/dbt/cli/main.py | 9 +++++- core/dbt/cli/params.py | 1 + core/dbt/config/project.py | 11 +++++-- core/dbt/deps/git.py | 11 ++++--- core/dbt/deps/local.py | 10 ++++-- core/dbt/deps/resolver.py | 25 ++++++++------- core/dbt/task/base.py | 20 ++++++------ core/dbt/task/clean.py | 4 ++- core/dbt/task/debug.py | 2 +- core/dbt/task/deps.py | 63 ++++++++++++++++++++++++++++++-------- core/dbt/task/init.py | 2 +- core/dbt/tracking.py | 9 ++---- test/unit/test_deps.py | 4 +-- 13 files changed, 116 insertions(+), 55 deletions(-) diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 09753a4d4cb..92f44be045b 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -10,6 +10,7 @@ from dbt.profiler import profiler from dbt.tracking import initialize_from_flags, track_run from dbt.config.runtime import load_project +from dbt.task.deps import DepsTask def cli_runner(): @@ -228,7 +229,13 @@ def debug(ctx, **kwargs): def deps(ctx, **kwargs): """Pull the most recent version of the dependencies listed in packages.yml""" flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + project = ctx.obj["project"] + + task = DepsTask.from_project(project, flags.VARS) + + results = task.run() + success = task.interpret_results(results) + return results, success # dbt init diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index d092d7eae51..f9fad2ebba4 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -350,6 +350,7 @@ envvar=None, help="Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. '{my_variable: my_value}'", type=YAML(), + default="{}", ) version = click.option( diff --git a/core/dbt/config/project.py b/core/dbt/config/project.py index 2e60e9b7d5d..7a0eb4c8e9d 100644 --- a/core/dbt/config/project.py +++ b/core/dbt/config/project.py @@ -37,9 +37,9 @@ Project as ProjectContract, SemverString, ) -from dbt.contracts.project import PackageConfig +from dbt.contracts.project import PackageConfig, ProjectPackageMetadata from dbt.dataclass_schema import ValidationError -from .renderer import DbtProjectYamlRenderer +from .renderer import DbtProjectYamlRenderer, PackageRenderer from .selectors import ( selector_config_from_data, selector_data_from_root, @@ -289,6 +289,13 @@ def render(self, renderer: DbtProjectYamlRenderer) -> "Project": exc.path = os.path.join(self.project_root, "dbt_project.yml") raise + def render_package_metadata(self, renderer: PackageRenderer) -> ProjectPackageMetadata: + packages_data = renderer.render_data(self.packages_dict) + packages_config = package_config_from_data(packages_data) + if not self.project_name: + raise DbtProjectError(DbtProjectError("Package dbt_project.yml must have a name!")) + return ProjectPackageMetadata(self.project_name, packages_config.packages) + def check_config_path(self, project_dict, deprecated_path, exp_path): if deprecated_path in project_dict: if exp_path in project_dict: diff --git a/core/dbt/deps/git.py b/core/dbt/deps/git.py index 9e86367acc4..2b08e04632f 100644 --- a/core/dbt/deps/git.py +++ b/core/dbt/deps/git.py @@ -3,7 +3,8 @@ from typing import List, Optional from dbt.clients import git, system -from dbt.config import Project +from dbt.config.project import PartialProject, Project +from dbt.config.renderer import PackageRenderer from dbt.contracts.project import ( ProjectPackageMetadata, GitPackage, @@ -89,7 +90,9 @@ def _checkout(self): raise return os.path.join(get_downloads_path(), dir_) - def _fetch_metadata(self, project, renderer) -> ProjectPackageMetadata: + def _fetch_metadata( + self, project: Project, renderer: PackageRenderer + ) -> ProjectPackageMetadata: path = self._checkout() if self.unpinned_msg() and self.warn_unpinned: @@ -100,8 +103,8 @@ def _fetch_metadata(self, project, renderer) -> ProjectPackageMetadata: ), log_fmt=ui.yellow("WARNING: {}"), ) - loaded = Project.from_project_root(path, renderer) - return ProjectPackageMetadata.from_project(loaded) + partial = PartialProject.from_project_root(path) + return partial.render_package_metadata(renderer) def install(self, project, renderer): dest_path = self.get_installation_path(project, renderer) diff --git a/core/dbt/deps/local.py b/core/dbt/deps/local.py index 6cb861489fa..93e2a3cc323 100644 --- a/core/dbt/deps/local.py +++ b/core/dbt/deps/local.py @@ -8,6 +8,8 @@ ) from dbt.events.functions import fire_event from dbt.events.types import DepsCreatingLocalSymlink, DepsSymlinkNotAvailable +from dbt.config.project import PartialProject, Project +from dbt.config.renderer import PackageRenderer class LocalPackageMixin: @@ -39,9 +41,11 @@ def resolve_path(self, project): project.project_root, ) - def _fetch_metadata(self, project, renderer): - loaded = project.from_project_root(self.resolve_path(project), renderer) - return ProjectPackageMetadata.from_project(loaded) + def _fetch_metadata( + self, project: Project, renderer: PackageRenderer + ) -> ProjectPackageMetadata: + partial = PartialProject.from_project_root(self.resolve_path(project)) + return partial.render_package_metadata(renderer) def install(self, project, renderer): src_path = self.resolve_path(project) diff --git a/core/dbt/deps/resolver.py b/core/dbt/deps/resolver.py index 4d971c1cd9e..7313280a3ca 100644 --- a/core/dbt/deps/resolver.py +++ b/core/dbt/deps/resolver.py @@ -1,10 +1,10 @@ from dataclasses import dataclass, field -from typing import Dict, List, NoReturn, Union, Type, Iterator, Set +from typing import Dict, List, NoReturn, Union, Type, Iterator, Set, Any from dbt.exceptions import raise_dependency_error, InternalException -from dbt.config import Project, RuntimeConfig -from dbt.config.renderer import DbtProjectYamlRenderer +from dbt.config import Project +from dbt.config.renderer import PackageRenderer from dbt.deps.base import BasePackage, PinnedPackage, UnpinnedPackage from dbt.deps.local import LocalUnpinnedPackage from dbt.deps.git import GitUnpinnedPackage @@ -94,19 +94,19 @@ def __iter__(self) -> Iterator[UnpinnedPackage]: def _check_for_duplicate_project_names( final_deps: List[PinnedPackage], - config: Project, - renderer: DbtProjectYamlRenderer, + project: Project, + renderer: PackageRenderer, ): seen: Set[str] = set() for package in final_deps: - project_name = package.get_project_name(config, renderer) + project_name = package.get_project_name(project, renderer) if project_name in seen: raise_dependency_error( f'Found duplicate project "{project_name}". This occurs when ' "a dependency has the same project name as some other " "dependency." ) - elif project_name == config.project_name: + elif project_name == project.project_name: raise_dependency_error( "Found a dependency with the same name as the root project " f'"{project_name}". Package names must be unique in a project.' @@ -116,21 +116,24 @@ def _check_for_duplicate_project_names( def resolve_packages( - packages: List[PackageContract], config: RuntimeConfig + packages: List[PackageContract], + project: Project, + cli_vars: Dict[str, Any], ) -> List[PinnedPackage]: pending = PackageListing.from_contracts(packages) final = PackageListing() - renderer = DbtProjectYamlRenderer(config, config.cli_vars) + + renderer = PackageRenderer(cli_vars) while pending: next_pending = PackageListing() # resolve the dependency in question for package in pending: final.incorporate(package) - target = final[package].resolved().fetch_metadata(config, renderer) + target = final[package].resolved().fetch_metadata(project, renderer) next_pending.update_from(target.packages) pending = next_pending resolved = final.resolved() - _check_for_duplicate_project_names(resolved, config, renderer) + _check_for_duplicate_project_names(resolved, project, renderer) return resolved diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index 1b067d79af8..699f14d6a57 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -75,10 +75,12 @@ def read_profiles(profiles_dir=None): class BaseTask(metaclass=ABCMeta): ConfigType: Union[Type[NoneConfig], Type[Project]] = NoneConfig - def __init__(self, args, config): + def __init__(self, args, config, project=None): self.args = args - self.args.single_threaded = False self.config = config + if hasattr(config, "args"): + self.config.args.single_threaded = False + self.project = config if isinstance(config, Project) else project @classmethod def pre_init_hook(cls, args): @@ -140,13 +142,13 @@ def interpret_results(self, results): return True -def get_nearest_project_dir(args): +def get_nearest_project_dir(project_dir: Optional[str]) -> str: # If the user provides an explicit project directory, use that # but don't look at parent directories. - if args.project_dir: - project_file = os.path.join(args.project_dir, "dbt_project.yml") + if project_dir: + project_file = os.path.join(project_dir, "dbt_project.yml") if os.path.exists(project_file): - return args.project_dir + return project_dir else: raise dbt.exceptions.RuntimeException( "fatal: Invalid --project-dir flag. Not a dbt project. " @@ -168,8 +170,8 @@ def get_nearest_project_dir(args): ) -def move_to_nearest_project_dir(args): - nearest_project_dir = get_nearest_project_dir(args) +def move_to_nearest_project_dir(project_dir: Optional[str]) -> str: + nearest_project_dir = get_nearest_project_dir(project_dir) os.chdir(nearest_project_dir) return nearest_project_dir @@ -183,7 +185,7 @@ def __init__(self, args, config): @classmethod def from_args(cls, args): - move_to_nearest_project_dir(args) + move_to_nearest_project_dir(args.project_dir) return super().from_args(args) diff --git a/core/dbt/task/clean.py b/core/dbt/task/clean.py index 3be80049715..68c9bdfab2e 100644 --- a/core/dbt/task/clean.py +++ b/core/dbt/task/clean.py @@ -15,6 +15,8 @@ class CleanTask(BaseTask): + # Note: CleanTask is the last task that uses UnsetProfileConfig, + # and can be deleted once CleanTask no longer requires it. ConfigType = UnsetProfileConfig def __is_project_path(self, path): @@ -35,7 +37,7 @@ def run(self): This function takes all the paths in the target file and cleans the project paths that are not protected. """ - move_to_nearest_project_dir(self.args) + move_to_nearest_project_dir(self.args.project_dir) if ( "dbt_modules" in self.config.clean_targets and self.config.packages_install_path not in self.config.clean_targets diff --git a/core/dbt/task/debug.py b/core/dbt/task/debug.py index d787a09d429..10003651a2f 100644 --- a/core/dbt/task/debug.py +++ b/core/dbt/task/debug.py @@ -62,7 +62,7 @@ def __init__(self, args, config): self.profiles_dir = flags.PROFILES_DIR self.profile_path = os.path.join(self.profiles_dir, "profiles.yml") try: - self.project_dir = get_nearest_project_dir(self.args) + self.project_dir = get_nearest_project_dir(self.args.project_dir) except dbt.exceptions.Exception: # we probably couldn't find a project directory. Set project dir # to whatever was given, or default to the current directory. diff --git a/core/dbt/task/deps.py b/core/dbt/task/deps.py index 3898eb28047..8bc19040bf1 100644 --- a/core/dbt/task/deps.py +++ b/core/dbt/task/deps.py @@ -1,9 +1,15 @@ +from typing import Dict, Any + +from dbt import flags + import dbt.utils import dbt.deprecations import dbt.exceptions -from dbt.config import UnsetProfileConfig +from dbt.config.profile import read_user_config +from dbt.config.runtime import load_project, UnsetProfile from dbt.config.renderer import DbtProjectYamlRenderer +from dbt.config.utils import parse_cli_vars from dbt.deps.base import downloads_directory from dbt.deps.resolver import resolve_packages @@ -23,11 +29,21 @@ from dbt.task.base import BaseTask, move_to_nearest_project_dir +from dbt.config import Project +from dbt.task.base import NoneConfig + + class DepsTask(BaseTask): - ConfigType = UnsetProfileConfig + ConfigType = NoneConfig - def __init__(self, args, config: UnsetProfileConfig): - super().__init__(args=args, config=config) + def __init__( + self, + args: Any, + project: Project, + cli_vars: Dict[str, Any], + ): + super().__init__(args=args, config=None, project=project) + self.cli_vars = cli_vars def track_package_install(self, package_name: str, source_type: str, version: str) -> None: # Hub packages do not need to be hashed, as they are public @@ -39,22 +55,22 @@ def track_package_install(self, package_name: str, source_type: str, version: st package_name = dbt.utils.md5(package_name) version = dbt.utils.md5(version) dbt.tracking.track_package_install( - self.config, - self.config.args, + "deps", + self.project.hashed_name(), {"name": package_name, "source": source_type, "version": version}, ) def run(self): - system.make_directory(self.config.packages_install_path) - packages = self.config.packages.packages + system.make_directory(self.project.packages_install_path) + packages = self.project.packages.packages if not packages: fire_event(DepsNoPackagesFound()) return with downloads_directory(): - final_deps = resolve_packages(packages, self.config) + final_deps = resolve_packages(packages, self.project, self.cli_vars) - renderer = DbtProjectYamlRenderer(self.config, self.config.cli_vars) + renderer = DbtProjectYamlRenderer(None, self.cli_vars) packages_to_upgrade = [] for package in final_deps: @@ -63,7 +79,7 @@ def run(self): version = package.get_version() fire_event(DepsStartPackageInstall(package_name=package_name)) - package.install(self.config, renderer) + package.install(self.project, renderer) fire_event(DepsInstallInfo(version_name=package.nice_version_name())) if source_type == "hub": version_latest = package.get_version_latest() @@ -82,9 +98,30 @@ def run(self): fire_event(EmptyLine()) fire_event(DepsNotifyUpdatesAvailable(packages=packages_to_upgrade)) + @classmethod + def _get_unset_profile(cls) -> UnsetProfile: + profile = UnsetProfile() + # The profile (for warehouse connection) is not needed, but we want + # to get the UserConfig, which is also in profiles.yml + user_config = read_user_config(flags.PROFILES_DIR) + profile.user_config = user_config + return profile + @classmethod def from_args(cls, args): # deps needs to move to the project directory, as it does put files # into the modules directory - move_to_nearest_project_dir(args) - return super().from_args(args) + nearest_project_dir = move_to_nearest_project_dir(args.project_dir) + + cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}")) + project_root: str = args.project_dir or nearest_project_dir + profile: UnsetProfile = cls._get_unset_profile() + project = load_project(project_root, args.version_check, profile, cli_vars) + + return cls(args, project, cli_vars) + + @classmethod + def from_project(cls, project: Project, cli_vars: Dict[str, Any]) -> "DepsTask": + move_to_nearest_project_dir(project.project_root) + # TODO: remove args=None once BaseTask does not require args + return cls(None, project, cli_vars) diff --git a/core/dbt/task/init.py b/core/dbt/task/init.py index b1769d2e729..ebb097a3311 100644 --- a/core/dbt/task/init.py +++ b/core/dbt/task/init.py @@ -250,7 +250,7 @@ def run(self): self.create_profiles_dir(profiles_dir) try: - move_to_nearest_project_dir(self.args) + move_to_nearest_project_dir(self.args.project_dir) in_project = True except dbt.exceptions.RuntimeException: in_project = False diff --git a/core/dbt/tracking.py b/core/dbt/tracking.py index 2a1611edbfb..1ca1f4d98ca 100644 --- a/core/dbt/tracking.py +++ b/core/dbt/tracking.py @@ -287,17 +287,12 @@ def get_base_invocation_context(): } -def track_package_install(config, args, options): +def track_package_install(command_name: str, project_hashed_name: Optional[str], options): assert active_user is not None, "Cannot track package installs when active user is None" invocation_data = get_base_invocation_context() - invocation_data.update( - { - "project_id": None if config is None else config.hashed_name(), - "command": args.which, - } - ) + invocation_data.update({"project_id": project_hashed_name, "command": command_name}) context = [ SelfDescribingJson(INVOCATION_SPEC, invocation_data), diff --git a/test/unit/test_deps.py b/test/unit/test_deps.py index 53d863a3206..30639473bff 100644 --- a/test/unit/test_deps.py +++ b/test/unit/test_deps.py @@ -636,7 +636,7 @@ def test_dependency_resolution(self): {'package': 'dbt-labs-test/b', 'version': '0.2.1'}, ], }) - resolved = resolve_packages(package_config.packages, mock.MagicMock(project_name='test')) + resolved = resolve_packages(package_config.packages, mock.MagicMock(project_name='test'), {}) self.assertEqual(len(resolved), 2) self.assertEqual(resolved[0].name, 'dbt-labs-test/a') self.assertEqual(resolved[0].version, '0.1.3') @@ -650,7 +650,7 @@ def test_dependency_resolution_allow_prerelease(self): {'package': 'dbt-labs-test/b', 'version': '0.2.1'}, ], }) - resolved = resolve_packages(package_config.packages, mock.MagicMock(project_name='test')) + resolved = resolve_packages(package_config.packages, mock.MagicMock(project_name='test'), {}) self.assertEqual(resolved[0].name, 'dbt-labs-test/a') self.assertEqual(resolved[0].version, '0.1.4a1') From e91863de59ba7881a5e519f109fee446a0f49976 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Fri, 2 Dec 2022 15:23:58 -0500 Subject: [PATCH 06/54] Set Flags from UserConfig (#6266) flags with user config, flags.WHICH from invoked_subcommand if available --- .../unreleased/Features-20221129-183239.yaml | 7 ++ core/dbt/cli/flags.py | 42 ++++++++-- tests/unit/test_cli_flags.py | 82 +++++++++++++++++++ 3 files changed, 122 insertions(+), 9 deletions(-) create mode 100644 .changes/unreleased/Features-20221129-183239.yaml create mode 100644 tests/unit/test_cli_flags.py diff --git a/.changes/unreleased/Features-20221129-183239.yaml b/.changes/unreleased/Features-20221129-183239.yaml new file mode 100644 index 00000000000..22a92ea36a7 --- /dev/null +++ b/.changes/unreleased/Features-20221129-183239.yaml @@ -0,0 +1,7 @@ +kind: Features +body: Click CLI Flags work with UserConfig +time: 2022-11-29T18:32:39.068035-05:00 +custom: + Author: michelleark + Issue: "6327" + PR: "6266" diff --git a/core/dbt/cli/flags.py b/core/dbt/cli/flags.py index 873cdfdfa40..93717875632 100644 --- a/core/dbt/cli/flags.py +++ b/core/dbt/cli/flags.py @@ -5,8 +5,13 @@ from importlib import import_module from multiprocessing import get_context from pprint import pformat as pf +from typing import Set from click import Context, get_current_context +from click.core import ParameterSource + +from dbt.config.profile import read_user_config +from dbt.contracts.project import UserConfig if os.name != "nt": # https://bugs.python.org/issue41567 @@ -15,12 +20,12 @@ @dataclass(frozen=True) class Flags: - def __init__(self, ctx: Context = None) -> None: + def __init__(self, ctx: Context = None, user_config: UserConfig = None) -> None: if ctx is None: ctx = get_current_context() - def assign_params(ctx): + def assign_params(ctx, params_assigned_from_default): """Recursively adds all click params to flag object""" for param_name, param_value in ctx.params.items(): # N.B. You have to use the base MRO method (object.__setattr__) to set attributes @@ -29,21 +34,40 @@ def assign_params(ctx): if hasattr(self, param_name): raise Exception(f"Duplicate flag names found in click command: {param_name}") object.__setattr__(self, param_name.upper(), param_value) + if ctx.get_parameter_source(param_name) == ParameterSource.DEFAULT: + params_assigned_from_default.add(param_name) if ctx.parent: - assign_params(ctx.parent) + assign_params(ctx.parent, params_assigned_from_default) - assign_params(ctx) + params_assigned_from_default = set() # type: Set[str] + assign_params(ctx, params_assigned_from_default) # Get the invoked command flags - if hasattr(ctx, "invoked_subcommand") and ctx.invoked_subcommand is not None: - invoked_subcommand = getattr(import_module("dbt.cli.main"), ctx.invoked_subcommand) + invoked_subcommand_name = ( + ctx.invoked_subcommand if hasattr(ctx, "invoked_subcommand") else None + ) + if invoked_subcommand_name is not None: + invoked_subcommand = getattr(import_module("dbt.cli.main"), invoked_subcommand_name) invoked_subcommand.allow_extra_args = True invoked_subcommand.ignore_unknown_options = True invoked_subcommand_ctx = invoked_subcommand.make_context(None, sys.argv) - assign_params(invoked_subcommand_ctx) + assign_params(invoked_subcommand_ctx, params_assigned_from_default) + + if not user_config: + profiles_dir = getattr(self, "PROFILES_DIR", None) + user_config = read_user_config(profiles_dir) if profiles_dir else None + + # Overwrite default assignments with user config if available + if user_config: + for param_assigned_from_default in params_assigned_from_default: + user_config_param_value = getattr(user_config, param_assigned_from_default, None) + if user_config_param_value is not None: + object.__setattr__( + self, param_assigned_from_default.upper(), user_config_param_value + ) # Hard coded flags - object.__setattr__(self, "WHICH", ctx.info_name) + object.__setattr__(self, "WHICH", invoked_subcommand_name or ctx.info_name) object.__setattr__(self, "MP_CONTEXT", get_context("spawn")) # Support console DO NOT TRACK initiave @@ -51,7 +75,7 @@ def assign_params(ctx): self, "ANONYMOUS_USAGE_STATS", False - if os.getenv("DO_NOT_TRACK", "").lower() in (1, "t", "true", "y", "yes") + if os.getenv("DO_NOT_TRACK", "").lower() in ("1", "t", "true", "y", "yes") else True, ) diff --git a/tests/unit/test_cli_flags.py b/tests/unit/test_cli_flags.py new file mode 100644 index 00000000000..d3dedac2390 --- /dev/null +++ b/tests/unit/test_cli_flags.py @@ -0,0 +1,82 @@ +import pytest + +import click +from multiprocessing import get_context +from typing import List + +from dbt.cli.main import cli +from dbt.contracts.project import UserConfig +from dbt.cli.flags import Flags + + +class TestFlags: + def make_dbt_context(self, context_name: str, args: List[str]) -> click.Context: + ctx = cli.make_context(context_name, args) + return ctx + + @pytest.fixture(scope="class") + def run_context(self) -> click.Context: + return self.make_dbt_context("run", ["run"]) + + def test_which(self, run_context): + flags = Flags(run_context) + assert flags.WHICH == "run" + + def test_mp_context(self, run_context): + flags = Flags(run_context) + assert flags.MP_CONTEXT == get_context("spawn") + + @pytest.mark.parametrize('param', cli.params) + def test_cli_group_flags_from_params(self, run_context, param): + flags = Flags(run_context) + assert hasattr(flags, param.name.upper()) + assert getattr(flags, param.name.upper()) == run_context.params[param.name.lower()] + + @pytest.mark.parametrize('do_not_track,expected_anonymous_usage_stats', [ + ("1", False), + ("t", False), + ("true", False), + ("y", False), + ("yes", False), + ("false", True), + ("anything", True), + ("2", True), + ]) + def test_anonymous_usage_state(self, monkeypatch, run_context, do_not_track, expected_anonymous_usage_stats): + monkeypatch.setenv("DO_NOT_TRACK", do_not_track) + + flags = Flags(run_context) + assert flags.ANONYMOUS_USAGE_STATS == expected_anonymous_usage_stats + + def test_empty_user_config_uses_default(self, run_context): + user_config = UserConfig() + + flags = Flags(run_context, user_config) + assert flags.USE_COLORS == run_context.params['use_colors'] + + def test_none_user_config_uses_default(self, run_context): + flags = Flags(run_context, None) + assert flags.USE_COLORS == run_context.params['use_colors'] + + def test_prefer_user_config_to_default(self, run_context): + user_config = UserConfig(use_colors=False) + # ensure default value is not the same as user config + assert run_context.params['use_colors'] is not user_config.use_colors + + flags = Flags(run_context, user_config) + assert flags.USE_COLORS == user_config.use_colors + + def test_prefer_param_value_to_user_config(self): + user_config = UserConfig(use_colors=False) + context = self.make_dbt_context("run", ["--use-colors", "True", "run"]) + + flags = Flags(context, user_config) + assert flags.USE_COLORS + + def test_prefer_env_to_user_config(self, monkeypatch): + user_config = UserConfig(use_colors=False) + monkeypatch.setenv("DBT_USE_COLORS", "True") + context = self.make_dbt_context("run", ["run"]) + + flags = Flags(context, user_config) + assert flags.USE_COLORS From 77be2e4fdf8bc21129b6c143f162208ada35433f Mon Sep 17 00:00:00 2001 From: Stu Kilgore Date: Mon, 5 Dec 2022 10:55:36 -0600 Subject: [PATCH 07/54] Pin ubuntu at 20.04 for some workflows (#6380) --- .changes/unreleased/Under the Hood-20221205-092217.yaml | 7 +++++++ .github/workflows/main.yml | 2 +- .github/workflows/structured-logging-schema-check.yml | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221205-092217.yaml diff --git a/.changes/unreleased/Under the Hood-20221205-092217.yaml b/.changes/unreleased/Under the Hood-20221205-092217.yaml new file mode 100644 index 00000000000..a9ebbb9699b --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221205-092217.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Pin ubuntu at 20.04 for some workflows +time: 2022-12-05T09:22:17.614875-06:00 +custom: + Author: stu-k + Issue: "9999" + PR: "6380" diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 4de07d83c07..257935419c8 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -119,7 +119,7 @@ jobs: fail-fast: false matrix: python-version: ["3.7", "3.8", "3.9", "3.10"] - os: [ubuntu-latest] + os: [ubuntu-20.04] include: - python-version: 3.8 os: windows-latest diff --git a/.github/workflows/structured-logging-schema-check.yml b/.github/workflows/structured-logging-schema-check.yml index c99a7107c14..d8dda921ef8 100644 --- a/.github/workflows/structured-logging-schema-check.yml +++ b/.github/workflows/structured-logging-schema-check.yml @@ -22,7 +22,7 @@ jobs: # run the performance measurements on the current or default branch test-schema: name: Test Log Schema - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 env: # turns warnings into errors RUSTFLAGS: "-D warnings" From 88d2ee48136d069f55404cd45e3d9c0fec9bed97 Mon Sep 17 00:00:00 2001 From: Ian Knox <81931810+iknox-fa@users.noreply.github.com> Date: Thu, 8 Dec 2022 09:38:15 -0600 Subject: [PATCH 08/54] Profile works w Click (#6336) --- .../Under the Hood-20221204-161110.yaml | 7 ++ core/dbt/cli/flags.py | 7 ++ core/dbt/cli/main.py | 30 ++++--- core/dbt/cli/params.py | 13 +++ core/dbt/config/profile.py | 10 +-- core/dbt/config/runtime.py | 84 +++++++++++-------- core/dbt/config/utils.py | 8 +- core/dbt/task/debug.py | 8 +- test/integration/base.py | 2 +- test/unit/test_config.py | 14 ++-- 10 files changed, 125 insertions(+), 58 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221204-161110.yaml diff --git a/.changes/unreleased/Under the Hood-20221204-161110.yaml b/.changes/unreleased/Under the Hood-20221204-161110.yaml new file mode 100644 index 00000000000..496ffdae7b2 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221204-161110.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Adds Profile support to the new Click CLI +time: 2022-12-04T16:11:10.417872-06:00 +custom: + Author: iknox-fa ChenyuLInx + Issue: "5536" + PR: "6336" diff --git a/core/dbt/cli/flags.py b/core/dbt/cli/flags.py index 93717875632..a8af62bf61d 100644 --- a/core/dbt/cli/flags.py +++ b/core/dbt/cli/flags.py @@ -79,5 +79,12 @@ def assign_params(ctx, params_assigned_from_default): else True, ) + # Support lower cased access for legacy code + params = set( + x for x in dir(self) if not callable(getattr(self, x)) and not x.startswith("__") + ) + for param in params: + object.__setattr__(self, param.lower(), getattr(self, param)) + def __str__(self) -> str: return str(pf(self.__dict__)) diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 92f44be045b..68d5cde5b85 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -6,11 +6,11 @@ from dbt.adapters.factory import adapter_management from dbt.cli import params as p from dbt.cli.flags import Flags +from dbt.config.runtime import load_project, load_profile from dbt.events.functions import setup_event_logger from dbt.profiler import profiler -from dbt.tracking import initialize_from_flags, track_run -from dbt.config.runtime import load_project from dbt.task.deps import DepsTask +from dbt.tracking import initialize_from_flags, track_run def cli_runner(): @@ -46,6 +46,7 @@ def cli_runner(): @p.printer_width @p.quiet @p.record_timing_info +@p.single_threaded @p.static_parser @p.use_colors @p.use_experimental_parser @@ -57,8 +58,9 @@ def cli(ctx, **kwargs): """An ELT tool for managing your SQL transformations and data models. For more documentation on these commands, visit: docs.getdbt.com """ - ctx.obj = {} + # Get primatives flags = Flags() + # Logging # N.B. Legacy logger is not supported setup_event_logger( @@ -67,6 +69,7 @@ def cli(ctx, **kwargs): flags.USE_COLORS, flags.DEBUG, ) + # Tracking initialize_from_flags(flags.ANONYMOUS_USAGE_STATS, flags.PROFILES_DIR) ctx.with_resource(track_run(run_command=ctx.invoked_subcommand)) @@ -75,11 +78,6 @@ def cli(ctx, **kwargs): if flags.RECORD_TIMING_INFO: ctx.with_resource(profiler(enable=True, outfile=flags.RECORD_TIMING_INFO)) - # TODO need profile to exisit - profile = None - - # project need profile to render because it requires knowing Target - ctx.obj["project"] = load_project(flags.PROJECT_DIR, flags.VERSION_CHECK, profile, flags.VARS) # Adapter management ctx.with_resource(adapter_management()) @@ -87,8 +85,20 @@ def cli(ctx, **kwargs): if flags.VERSION: click.echo(f"`version` called\n ctx.params: {pf(ctx.params)}") return - else: - del ctx.params["version"] + + # Profile + profile = load_profile( + flags.PROJECT_DIR, flags.VARS, flags.PROFILE, flags.TARGET, flags.THREADS + ) + + # Project + project = load_project(flags.PROJECT_DIR, flags.VERSION_CHECK, profile, flags.VARS) + + # Context for downstream commands + ctx.obj = {} + ctx.obj["flags"] = flags + ctx.obj["profile"] = profile + ctx.obj["project"] = project # dbt build diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index f9fad2ebba4..137683c09b2 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -278,6 +278,19 @@ "--show", envvar=None, help="Show a sample of the loaded data in the terminal", is_flag=True ) +# TODO: The env var is a correction! +# The original env var was `DBT_TEST_SINGLE_THREADED`. +# This broke the existing naming convention. +# This will need to be communicated as a change to the community! +# +# N.B. This flag is only used for testing, hence it's hidden from help text. +single_threaded = click.option( + "--single-threaded/--no-single-threaded", + envvar="DBT_SINGLE_THREADED", + default=False, + hidden=True, +) + skip_profile_setup = click.option( "--skip-profile-setup", "-s", envvar=None, help="Skip interative profile setup.", is_flag=True ) diff --git a/core/dbt/config/profile.py b/core/dbt/config/profile.py index d8408abd0e4..542062a2f6f 100644 --- a/core/dbt/config/profile.py +++ b/core/dbt/config/profile.py @@ -399,11 +399,13 @@ def from_raw_profiles( ) @classmethod - def render_from_args( + def render( cls, - args: Any, renderer: ProfileRenderer, project_profile_name: Optional[str], + profile_name_override: Optional[str] = None, + target_override: Optional[str] = None, + threads_override: Optional[int] = None, ) -> "Profile": """Given the raw profiles as read from disk and the name of the desired profile if specified, return the profile component of the runtime @@ -419,9 +421,7 @@ def render_from_args( target could not be found. :returns Profile: The new Profile object. """ - threads_override = getattr(args, "threads", None) - target_override = getattr(args, "target", None) - profile_name_override = getattr(args, "profile", None) + raw_profiles = read_profile(flags.PROFILES_DIR) profile_name = cls.pick_profile_name(profile_name_override, project_profile_name) return cls.from_raw_profiles( diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index 82f6278d381..b9fc8669298 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -48,6 +48,25 @@ def load_project( return project +def load_profile( + project_root: str, + cli_vars: Dict[str, Any], + profile_name_override: Optional[str] = None, + target_override: Optional[str] = None, + threads_override: Optional[int] = None, +) -> Profile: + raw_project = load_raw_project(project_root) + raw_profile_name = raw_project.get("profile") + profile_renderer = ProfileRenderer(cli_vars) + profile_name = profile_renderer.render_value(raw_profile_name) + profile = Profile.render( + profile_renderer, profile_name, profile_name_override, target_override, threads_override + ) + # Save env_vars encountered in rendering for partial parsing + profile.profile_env_vars = profile_renderer.ctx_obj.env_vars + return profile + + def _project_quoting_dict(proj: Project, profile: Profile) -> Dict[ComponentName, bool]: src: Dict[str, Any] = profile.credentials.translate_aliases(proj.quoting) result: Dict[ComponentName, bool] = {} @@ -69,6 +88,21 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig): def __post_init__(self): self.validate() + @classmethod + def get_profile( + cls, + project_root: str, + cli_vars: Dict[str, Any], + args: Any, + ) -> Profile: + return load_profile( + project_root, + cli_vars, + args.profile, + args.target, + args.threads, + ) + # Called by 'new_project' and 'from_args' @classmethod def from_parts( @@ -196,42 +230,17 @@ def validate(self): except ValidationError as e: raise DbtProjectError(validator_error_message(e)) from e - @classmethod - def _get_rendered_profile( - cls, - args: Any, - profile_renderer: ProfileRenderer, - profile_name: Optional[str], - ) -> Profile: - - return Profile.render_from_args(args, profile_renderer, profile_name) - - @classmethod - def get_profile( - cls: Type["RuntimeConfig"], args: Any, cli_vars: Dict[str, Any], raw_profile_name: str - ) -> Profile: - # build the profile using the base renderer and the one fact we know - # Note: only the named profile section is rendered. The rest of the - # profile is ignored. - profile_renderer = ProfileRenderer(cli_vars) - profile_name = profile_renderer.render_value(raw_profile_name) - profile = cls._get_rendered_profile(args, profile_renderer, profile_name) - # Save env_vars encountered in rendering for partial parsing - profile.profile_env_vars = profile_renderer.ctx_obj.env_vars - return profile - @classmethod def collect_parts(cls: Type["RuntimeConfig"], args: Any) -> Tuple[Project, Profile]: # profile_name from the project project_root = args.project_dir if args.project_dir else os.getcwd() - raw_project = load_raw_project(project_root) - raw_profile_name: str = raw_project.get("profile") # type: ignore cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}")) - - profile = cls.get_profile(args, cli_vars, raw_profile_name) - + profile = cls.get_profile( + project_root, + cli_vars, + args, + ) project = load_project(project_root, bool(flags.VERSION_CHECK), profile, cli_vars) - return (project, profile) # Called in main.py, lib.py, task/base.py @@ -582,18 +591,25 @@ def from_parts( ) @classmethod - def _get_rendered_profile( + def get_profile( cls, + project_root: str, + cli_vars: Dict[str, Any], args: Any, - profile_renderer: ProfileRenderer, - profile_name: Optional[str], ) -> Profile: + """ + Moving all logic regarding constructing a complete UnsetProfile to this function + This way we can have a clean load_profile function to call by the new CLI and remove + all logic for UnsetProfile once we migrate to new click CLI + """ profile = UnsetProfile() # The profile (for warehouse connection) is not needed, but we want # to get the UserConfig, which is also in profiles.yml - user_config = read_user_config(flags.PROFILES_DIR) + user_config = read_user_config(project_root) profile.user_config = user_config + profile_renderer = ProfileRenderer(cli_vars) + profile.profile_env_vars = profile_renderer.ctx_obj.env_vars return profile @classmethod diff --git a/core/dbt/config/utils.py b/core/dbt/config/utils.py index 728e558ebbd..f9b02bc2eb4 100644 --- a/core/dbt/config/utils.py +++ b/core/dbt/config/utils.py @@ -64,7 +64,13 @@ def get_project_config( flags.set_from_args(args, user_config) if cli_vars is None: cli_vars = {} - profile = Profile.render_from_args(args, ProfileRenderer(cli_vars), profile_name) + profile = Profile.render( + ProfileRenderer(cli_vars), + profile_name, + args.THREADS, + args.TARGET, + args.PROFILE, + ) # Generate a project project = Project.from_project_root( project_path, diff --git a/core/dbt/task/debug.py b/core/dbt/task/debug.py index 10003651a2f..7d46323f40c 100644 --- a/core/dbt/task/debug.py +++ b/core/dbt/task/debug.py @@ -249,7 +249,13 @@ def _load_profile(self): renderer = ProfileRenderer(self.cli_vars) for profile_name in profile_names: try: - profile: Profile = Profile.render_from_args(self.args, renderer, profile_name) + profile: Profile = Profile.render( + renderer, + profile_name, + self.args.threads, + self.args.target, + self.args.profile, + ) except dbt.exceptions.DbtConfigError as exc: profile_errors.append(str(exc)) else: diff --git a/test/integration/base.py b/test/integration/base.py index 9726ff7d482..8b06782a334 100644 --- a/test/integration/base.py +++ b/test/integration/base.py @@ -540,7 +540,7 @@ def run_dbt_and_check(self, args=None, profiles_dir=True): final_args = [] - if os.getenv('DBT_TEST_SINGLE_THREADED') in ('y', 'Y', '1'): + if os.getenv('DBT_SINGLE_THREADED') in ('y', 'Y', '1'): final_args.append('--single-threaded') final_args.extend(args) diff --git a/test/unit/test_config.py b/test/unit/test_config.py index 8ca8238a7d0..f730a90d506 100644 --- a/test/unit/test_config.py +++ b/test/unit/test_config.py @@ -88,10 +88,10 @@ def empty_project_renderer(): class Args: def __init__(self, profiles_dir=None, threads=None, profile=None, - cli_vars=None, version_check=None, project_dir=None): + cli_vars=None, version_check=None, project_dir=None, target=None): self.profile = profile - if threads is not None: - self.threads = threads + self.threads = threads + self.target = target if profiles_dir is not None: self.profiles_dir = profiles_dir flags.PROFILES_DIR = profiles_dir @@ -404,12 +404,14 @@ def from_raw_profile_info(self, raw_profile=None, profile_name='default', **kwar def from_args(self, project_profile_name='default', **kwargs): kw = { - 'args': self.args, 'project_profile_name': project_profile_name, - 'renderer': empty_profile_renderer() + 'renderer': empty_profile_renderer(), + 'threads_override': self.args.threads, + 'target_override': self.args.target, + 'profile_name_override': self.args.profile, } kw.update(kwargs) - return dbt.config.Profile.render_from_args(**kw) + return dbt.config.Profile.render(**kw) def test_profile_simple(self): profile = self.from_args() From 1809852a0d4af020dd2a4665d425c1f29e420bc1 Mon Sep 17 00:00:00 2001 From: Ian Knox <81931810+iknox-fa@users.noreply.github.com> Date: Thu, 8 Dec 2022 09:47:08 -0600 Subject: [PATCH 09/54] `dbt run` works with Click (#6396) --- .../Under the Hood-20221206-151759.yaml | 7 +++++ core/dbt/cli/main.py | 29 ++++++++++++------- core/dbt/cli/params.py | 17 ++++++----- core/dbt/config/runtime.py | 7 ++--- core/dbt/config/utils.py | 4 +-- core/dbt/graph/cli.py | 8 +++-- core/dbt/task/base.py | 2 -- core/dbt/task/debug.py | 4 +++ core/dbt/task/deps.py | 4 +++ core/dbt/task/run_operation.py | 5 ++++ core/dbt/task/runnable.py | 13 +++++++-- test/unit/test_config.py | 8 ++--- test/unit/test_graph.py | 7 ++++- test/unit/test_graph_selection.py | 7 ++++- test/unit/test_linker.py | 7 ++++- test/unit/test_parser.py | 2 +- test/unit/utils.py | 2 +- 17 files changed, 91 insertions(+), 42 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221206-151759.yaml diff --git a/.changes/unreleased/Under the Hood-20221206-151759.yaml b/.changes/unreleased/Under the Hood-20221206-151759.yaml new file mode 100644 index 00000000000..cce61ce40bc --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221206-151759.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: '`dbt run` works with Click cli' +time: 2022-12-06T15:17:59.765623-06:00 +custom: + Author: iknox-fa + Issue: "5551" + PR: "6396" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 68d5cde5b85..4901caf56f4 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -6,10 +6,12 @@ from dbt.adapters.factory import adapter_management from dbt.cli import params as p from dbt.cli.flags import Flags +from dbt.config import RuntimeConfig from dbt.config.runtime import load_project, load_profile from dbt.events.functions import setup_event_logger from dbt.profiler import profiler from dbt.task.deps import DepsTask +from dbt.task.run import RunTask from dbt.tracking import initialize_from_flags, track_run @@ -109,10 +111,10 @@ def cli(ctx, **kwargs): @p.fail_fast @p.full_refresh @p.indirect_selection -@p.models @p.profile @p.profiles_dir @p.project_dir +@p.select @p.selector @p.show @p.state @@ -155,10 +157,10 @@ def docs(ctx, **kwargs): @p.compile_docs @p.defer @p.exclude -@p.models @p.profile @p.profiles_dir @p.project_dir +@p.select @p.selector @p.state @p.target @@ -194,11 +196,11 @@ def docs_serve(ctx, **kwargs): @p.defer @p.exclude @p.full_refresh -@p.models @p.parse_only @p.profile @p.profiles_dir @p.project_dir +@p.select @p.selector @p.state @p.target @@ -268,13 +270,13 @@ def init(ctx, **kwargs): @click.pass_context @p.exclude @p.indirect_selection -@p.models @p.output @p.output_keys @p.profile @p.profiles_dir @p.project_dir @p.resource_type +@p.select @p.selector @p.state @p.target @@ -311,10 +313,10 @@ def parse(ctx, **kwargs): @p.exclude @p.fail_fast @p.full_refresh -@p.models @p.profile @p.profiles_dir @p.project_dir +@p.select @p.selector @p.state @p.target @@ -324,8 +326,13 @@ def parse(ctx, **kwargs): @p.version_check def run(ctx, **kwargs): """Compile SQL and execute against the current target database.""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + + config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) + task = RunTask(ctx.obj["flags"], config) + + results = task.run() + success = task.interpret_results(results) + return results, success # dbt run operation @@ -348,10 +355,10 @@ def run_operation(ctx, **kwargs): @click.pass_context @p.exclude @p.full_refresh -@p.models @p.profile @p.profiles_dir @p.project_dir +@p.select @p.selector @p.show @p.state @@ -371,10 +378,10 @@ def seed(ctx, **kwargs): @click.pass_context @p.defer @p.exclude -@p.models @p.profile @p.profiles_dir @p.project_dir +@p.select @p.selector @p.state @p.target @@ -397,11 +404,11 @@ def source(ctx, **kwargs): @source.command("freshness") @click.pass_context @p.exclude -@p.models @p.output_path # TODO: Is this ok to re-use? We have three different output params, how much can we consolidate? @p.profile @p.profiles_dir @p.project_dir +@p.select @p.selector @p.state @p.target @@ -420,10 +427,10 @@ def freshness(ctx, **kwargs): @p.exclude @p.fail_fast @p.indirect_selection -@p.models @p.profile @p.profiles_dir @p.project_dir +@p.select @p.selector @p.state @p.store_failures diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index 137683c09b2..dcfc16e6dab 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -141,14 +141,6 @@ hidden=True, ) -models = click.option( - "-m", - "-s", - "models", - envvar=None, - help="Specify the nodes to include.", - multiple=True, -) output = click.option( "--output", @@ -270,6 +262,15 @@ default="default", ) +select = click.option( + "-m", + "-s", + "select", + envvar=None, + help="Specify the nodes to include.", + multiple=True, +) + selector = click.option( "--selector", envvar=None, help="The selector name to use, as defined in selectors.yml" ) diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index b9fc8669298..ccf95c65f7c 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -8,7 +8,6 @@ from .profile import Profile from .project import Project from .renderer import DbtProjectYamlRenderer, ProfileRenderer -from .utils import parse_cli_vars from dbt import flags from dbt.adapters.factory import get_relation_class_by_name, get_include_paths from dbt.helper_types import FQNPath, PathSet, DictDefaultEmptyStr @@ -125,7 +124,7 @@ def from_parts( .replace_dict(_project_quoting_dict(project, profile)) ).to_dict(omit_none=True) - cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}")) + cli_vars: Dict[str, Any] = getattr(args, "vars", {}) return cls( project_name=project.project_name, @@ -234,7 +233,7 @@ def validate(self): def collect_parts(cls: Type["RuntimeConfig"], args: Any) -> Tuple[Project, Profile]: # profile_name from the project project_root = args.project_dir if args.project_dir else os.getcwd() - cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}")) + cli_vars: Dict[str, Any] = getattr(args, "vars", {}) profile = cls.get_profile( project_root, cli_vars, @@ -541,7 +540,7 @@ def from_parts( :param args: The parsed command-line arguments. :returns RuntimeConfig: The new configuration. """ - cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}")) + cli_vars: Dict[str, Any] = getattr(args, "vars", {}) return cls( project_name=project.project_name, diff --git a/core/dbt/config/utils.py b/core/dbt/config/utils.py index f9b02bc2eb4..b3be5d5501b 100644 --- a/core/dbt/config/utils.py +++ b/core/dbt/config/utils.py @@ -12,9 +12,9 @@ from dbt.exceptions import ValidationException, raise_compiler_error -def parse_cli_vars(var_string: str) -> Dict[str, Any]: +def parse_cli_vars(var: str) -> Dict[str, Any]: try: - cli_vars = yaml_helper.load_yaml_text(var_string) + cli_vars = yaml_helper.load_yaml_text(var) var_type = type(cli_vars) if var_type is dict: return cli_vars diff --git a/core/dbt/graph/cli.py b/core/dbt/graph/cli.py index 6059de6b042..51464912a1b 100644 --- a/core/dbt/graph/cli.py +++ b/core/dbt/graph/cli.py @@ -1,5 +1,4 @@ # special support for CLI argument parsing. -from dbt import flags from copy import deepcopy import itertools from dbt.clients.yaml_helper import yaml, Loader, Dumper # noqa: F401 @@ -71,11 +70,14 @@ def parse_union_from_default( def parse_difference( - include: Optional[List[str]], exclude: Optional[List[str]] + include: Optional[List[str]], exclude: Optional[List[str]], indirect_selection: Any ) -> SelectionDifference: + if include == (): + include = None + included = parse_union_from_default( - include, DEFAULT_INCLUDES, indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION) + include, DEFAULT_INCLUDES, indirect_selection=IndirectSelection(indirect_selection) ) excluded = parse_union_from_default( exclude, DEFAULT_EXCLUDES, indirect_selection=IndirectSelection.Eager diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index 699f14d6a57..45f8c0fd0fd 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -78,8 +78,6 @@ class BaseTask(metaclass=ABCMeta): def __init__(self, args, config, project=None): self.args = args self.config = config - if hasattr(config, "args"): - self.config.args.single_threaded = False self.project = config if isinstance(config, Project) else project @classmethod diff --git a/core/dbt/task/debug.py b/core/dbt/task/debug.py index 7d46323f40c..10e3a1ad2a6 100644 --- a/core/dbt/task/debug.py +++ b/core/dbt/task/debug.py @@ -71,6 +71,10 @@ def __init__(self, args, config): else: self.project_dir = os.getcwd() self.project_path = os.path.join(self.project_dir, "dbt_project.yml") + # N.B. parse_cli_vars is embedded into the param when using click. + # replace this with: + # cli_vars: Dict[str, Any] = getattr(args, "vars", {}) + # when this task is refactored for click self.cli_vars = parse_cli_vars(getattr(self.args, "vars", "{}")) # set by _load_* diff --git a/core/dbt/task/deps.py b/core/dbt/task/deps.py index 8bc19040bf1..6b3bc5fb7c4 100644 --- a/core/dbt/task/deps.py +++ b/core/dbt/task/deps.py @@ -113,6 +113,10 @@ def from_args(cls, args): # into the modules directory nearest_project_dir = move_to_nearest_project_dir(args.project_dir) + # N.B. parse_cli_vars is embedded into the param when using click. + # replace this with: + # cli_vars: Dict[str, Any] = getattr(args, "vars", {}) + # when this task is refactored for click cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}")) project_root: str = args.project_dir or nearest_project_dir profile: UnsetProfile = cls._get_unset_profile() diff --git a/core/dbt/task/run_operation.py b/core/dbt/task/run_operation.py index f867824c408..e7b43a837b0 100644 --- a/core/dbt/task/run_operation.py +++ b/core/dbt/task/run_operation.py @@ -30,6 +30,11 @@ def _get_macro_parts(self): return package_name, macro_name def _get_kwargs(self) -> Dict[str, Any]: + # N.B. parse_cli_vars is embedded into the param when using click. + # replace this with: + # return self.args.args + # when this task is refactored for click + # or remove the function completely as it's basically a noop return parse_cli_vars(self.args.args) def compile_manifest(self) -> None: diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index af0de610c98..c6866cde2e1 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -137,16 +137,23 @@ def exclusion_arg(self): def get_selection_spec(self) -> SelectionSpec: default_selector_name = self.config.get_default_selector_name() - if self.args.selector_name: + # TODO: The "eager" string below needs to be replaced with programatic access + # to the default value for the indirect selection parameter in + # dbt.cli.params.indirect_selection + # + # Doing that is actually a little tricky, so I'm punting it to a new ticket GH #6397 + indirect_selection = getattr(self.args, "INDIRECT_SELECTION", "eager") + + if self.args.selector: # use pre-defined selector (--selector) - spec = self.config.get_selector(self.args.selector_name) + spec = self.config.get_selector(self.args.selector) elif not (self.selection_arg or self.exclusion_arg) and default_selector_name: # use pre-defined selector (--selector) with default: true fire_event(DefaultSelector(name=default_selector_name)) spec = self.config.get_selector(default_selector_name) else: # use --select and --exclude args - spec = parse_difference(self.selection_arg, self.exclusion_arg) + spec = parse_difference(self.selection_arg, self.exclusion_arg, indirect_selection) return spec @abstractmethod diff --git a/test/unit/test_config.py b/test/unit/test_config.py index f730a90d506..697dc05a1bb 100644 --- a/test/unit/test_config.py +++ b/test/unit/test_config.py @@ -22,7 +22,7 @@ from dbt.semver import VersionSpecifier from dbt.task.run_operation import RunOperationTask -from .utils import normalize, config_from_parts_or_dicts +from .utils import normalize INITIAL_ROOT = os.getcwd() @@ -165,7 +165,7 @@ def setUp(self): }, 'empty_profile_data': {} } - self.args = Args(profiles_dir=self.profiles_dir, cli_vars='{}', + self.args = Args(profiles_dir=self.profiles_dir, cli_vars={}, version_check=True, project_dir=self.project_dir) self.env_override = { 'env_value_type': 'postgres', @@ -510,7 +510,7 @@ def test_invalid_env_vars(self): def test_cli_and_env_vars(self): self.args.target = 'cli-and-env-vars' - self.args.vars = '{"cli_value_host": "cli-postgres-host"}' + self.args.vars = {"cli_value_host": "cli-postgres-host"} renderer = dbt.config.renderer.ProfileRenderer({'cli_value_host': 'cli-postgres-host'}) with mock.patch.dict(os.environ, self.env_override): profile = self.from_args(renderer=renderer) @@ -1307,7 +1307,7 @@ def setUp(self): def test_cli_and_env_vars(self): self.args.target = 'cli-and-env-vars' - self.args.vars = '{"cli_value_host": "cli-postgres-host", "cli_version": "0.1.2"}' + self.args.vars = {"cli_value_host": "cli-postgres-host", "cli_version": "0.1.2"} with mock.patch.dict(os.environ, self.env_override), temp_cd(self.project_dir): config = dbt.config.RuntimeConfig.from_args(self.args) diff --git a/test/unit/test_graph.py b/test/unit/test_graph.py index 5534fe21f19..90c0141d00a 100644 --- a/test/unit/test_graph.py +++ b/test/unit/test_graph.py @@ -300,7 +300,12 @@ def test__dependency_list(self): }) manifest.expect.side_effect = lambda n: MagicMock(unique_id=n) selector = NodeSelector(graph, manifest) - queue = selector.get_graph_queue(parse_difference(None, None)) + # TODO: The "eager" string below needs to be replaced with programatic access + # to the default value for the indirect selection parameter in + # dbt.cli.params.indirect_selection + # + # Doing that is actually a little tricky, so I'm punting it to a new ticket GH #6397 + queue = selector.get_graph_queue(parse_difference(None, None, "eager")) for model_id in model_ids: self.assertFalse(queue.empty()) diff --git a/test/unit/test_graph_selection.py b/test/unit/test_graph_selection.py index a0da5b490e9..e45fb4de15a 100644 --- a/test/unit/test_graph_selection.py +++ b/test/unit/test_graph_selection.py @@ -126,7 +126,12 @@ def test_run_specs(include, exclude, expected): graph = _get_graph() manifest = _get_manifest(graph) selector = graph_selector.NodeSelector(graph, manifest) - spec = graph_cli.parse_difference(include, exclude) + # TODO: The "eager" string below needs to be replaced with programatic access + # to the default value for the indirect selection parameter in + # dbt.cli.params.indirect_selection + # + # Doing that is actually a little tricky, so I'm punting it to a new ticket GH #6397 + spec = graph_cli.parse_difference(include, exclude, "eager") selected, _ = selector.select_nodes(spec) assert selected == expected diff --git a/test/unit/test_linker.py b/test/unit/test_linker.py index b3b3627ff76..273b5a2a7ef 100644 --- a/test/unit/test_linker.py +++ b/test/unit/test_linker.py @@ -66,7 +66,12 @@ def assert_would_join(self, queue): def _get_graph_queue(self, manifest, include=None, exclude=None): graph = compilation.Graph(self.linker.graph) selector = NodeSelector(graph, manifest) - spec = parse_difference(include, exclude) + # TODO: The "eager" string below needs to be replaced with programatic access + # to the default value for the indirect selection parameter in + # dbt.cli.params.indirect_selection + # + # Doing that is actually a little tricky, so I'm punting it to a new ticket GH #6397 + spec = parse_difference(include, exclude, "eager") return selector.get_graph_queue(spec) def test_linker_add_dependency(self): diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 8fdf297d4cd..529fbef8b94 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -107,7 +107,7 @@ def setUp(self): self.root_project_config = config_from_parts_or_dicts( project=root_project, profile=profile_data, - cli_vars='{"test_schema_name": "foo"}' + cli_vars={"test_schema_name": "foo"} ) snowplow_project = { diff --git a/test/unit/utils.py b/test/unit/utils.py index a66f97d7ed9..521a83e329c 100644 --- a/test/unit/utils.py +++ b/test/unit/utils.py @@ -75,7 +75,7 @@ def project_from_dict(project, profile, packages=None, selectors=None, cli_vars= -def config_from_parts_or_dicts(project, profile, packages=None, selectors=None, cli_vars='{}'): +def config_from_parts_or_dicts(project, profile, packages=None, selectors=None, cli_vars={}): from dbt.config import Project, Profile, RuntimeConfig from copy import deepcopy From ce1aaec31db40720b4d44f48dfb08a76013b56b9 Mon Sep 17 00:00:00 2001 From: Stu Kilgore Date: Mon, 12 Dec 2022 11:13:04 -0600 Subject: [PATCH 10/54] Adjust tox passenv to be multiline (#6405) (#6430) --- .changes/unreleased/Under the Hood-20221212-110859.yaml | 7 +++++++ tox.ini | 9 +++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221212-110859.yaml diff --git a/.changes/unreleased/Under the Hood-20221212-110859.yaml b/.changes/unreleased/Under the Hood-20221212-110859.yaml new file mode 100644 index 00000000000..4c4bbaa294e --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221212-110859.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Cherry pick tox fix +time: 2022-12-12T11:08:59.440276-06:00 +custom: + Author: stu-k + Issue: "9999" + PR: "6430" diff --git a/tox.ini b/tox.ini index 109e8b4f62f..84fc54c5957 100644 --- a/tox.ini +++ b/tox.ini @@ -6,7 +6,9 @@ envlist = unit,integration description = unit testing download = true skip_install = true -passenv = DBT_* PYTEST_ADDOPTS +passenv = + DBT_* + PYTEST_ADDOPTS commands = {envpython} -m pytest --cov=core {posargs} test/unit {envpython} -m pytest --cov=core {posargs} tests/unit @@ -18,7 +20,10 @@ deps = description = adapter plugin integration testing download = true skip_install = true -passenv = DBT_* POSTGRES_TEST_* PYTEST_ADDOPTS +passenv = + DBT_* + POSTGRES_TEST_* + PYTEST_ADDOPTS commands = {envpython} -m pytest --cov=core -m profile_postgres {posargs} test/integration {envpython} -m pytest --cov=core {posargs} tests/functional From 5d278dacf1993908eab60420f87a9a94a20a5615 Mon Sep 17 00:00:00 2001 From: Stu Kilgore Date: Thu, 15 Dec 2022 11:19:16 -0600 Subject: [PATCH 11/54] Make clean task work with click (#6369) --- .../Under the Hood-20221202-123046.yaml | 7 +++ core/dbt/cli/main.py | 12 ++++- core/dbt/task/clean.py | 44 +++++++++---------- 3 files changed, 39 insertions(+), 24 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221202-123046.yaml diff --git a/.changes/unreleased/Under the Hood-20221202-123046.yaml b/.changes/unreleased/Under the Hood-20221202-123046.yaml new file mode 100644 index 00000000000..9209fea0639 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221202-123046.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: '`dbt clean` works with click cli' +time: 2022-12-02T12:30:46.711184-06:00 +custom: + Author: stu-k + Issue: "5542" + PR: "6369" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 4901caf56f4..a7bbc9d6ae6 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -10,9 +10,11 @@ from dbt.config.runtime import load_project, load_profile from dbt.events.functions import setup_event_logger from dbt.profiler import profiler +from dbt.tracking import initialize_from_flags, track_run + +from dbt.task.clean import CleanTask from dbt.task.deps import DepsTask from dbt.task.run import RunTask -from dbt.tracking import initialize_from_flags, track_run def cli_runner(): @@ -141,7 +143,13 @@ def build(ctx, **kwargs): def clean(ctx, **kwargs): """Delete all folders in the clean-targets list (usually the dbt_packages and target directories.)""" flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + project = ctx.obj["project"] + + task = CleanTask(flags, project) + + results = task.run() + success = task.interpret_results(results) + return results, success # dbt docs diff --git a/core/dbt/task/clean.py b/core/dbt/task/clean.py index 68c9bdfab2e..6f31fc81ecd 100644 --- a/core/dbt/task/clean.py +++ b/core/dbt/task/clean.py @@ -1,9 +1,9 @@ import os.path import os import shutil +from typing import List from dbt import deprecations -from dbt.task.base import BaseTask, move_to_nearest_project_dir from dbt.events.functions import fire_event from dbt.events.types import ( CheckCleanPath, @@ -11,27 +11,13 @@ ProtectedCleanPath, FinishedCleanPaths, ) -from dbt.config import UnsetProfileConfig +from dbt.task.base import ( + BaseTask, + move_to_nearest_project_dir, +) class CleanTask(BaseTask): - # Note: CleanTask is the last task that uses UnsetProfileConfig, - # and can be deleted once CleanTask no longer requires it. - ConfigType = UnsetProfileConfig - - def __is_project_path(self, path): - proj_path = os.path.abspath(".") - return not os.path.commonprefix([proj_path, os.path.abspath(path)]) == proj_path - - def __is_protected_path(self, path): - """ - This function identifies protected paths, so as not to clean them. - """ - abs_path = os.path.abspath(path) - protected_paths = self.config.model_paths + self.config.test_paths + ["."] - protected_abs_paths = [os.path.abspath(p) for p in protected_paths] - return abs_path in set(protected_abs_paths) or self.__is_project_path(abs_path) - def run(self): """ This function takes all the paths in the target file @@ -39,16 +25,30 @@ def run(self): """ move_to_nearest_project_dir(self.args.project_dir) if ( - "dbt_modules" in self.config.clean_targets + "dbt_modules" in self.project.clean_targets and self.config.packages_install_path not in self.config.clean_targets ): deprecations.warn("install-packages-path") - for path in self.config.clean_targets: + for path in self.project.clean_targets: fire_event(CheckCleanPath(path=path)) - if not self.__is_protected_path(path): + if not is_protected_path(path, self.project.model_paths, self.project.test_paths): shutil.rmtree(path, True) fire_event(ConfirmCleanPath(path=path)) else: fire_event(ProtectedCleanPath(path=path)) fire_event(FinishedCleanPaths()) + + +def is_protected_path(path: str, model_paths: List[str], test_paths: List[str]) -> bool: + """This function identifies protected paths.""" + abs_path = os.path.abspath(path) + protected_paths = model_paths + test_paths + ["."] + protected_abs_paths = [os.path.abspath(p) for p in protected_paths] + return abs_path in set(protected_abs_paths) or is_project_path(abs_path) + + +def is_project_path(path: str) -> bool: + """This function identifies project paths.""" + proj_path = os.path.abspath(".") + return not os.path.commonprefix([proj_path, os.path.abspath(path)]) == proj_path From b0909b8f5d801d047328c00d56a221916ba390b3 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Fri, 16 Dec 2022 19:54:42 -0500 Subject: [PATCH 12/54] Functional test framework working with Click, dbtRunner (#6387) dbtRunner, dbt.cli.requires - preflight, profile, project --- .../Under the Hood-20221214-112048.yaml | 7 + core/dbt/cli/main.py | 160 +++++++++--------- core/dbt/cli/params.py | 6 +- core/dbt/cli/requires.py | 87 ++++++++++ core/dbt/tests/fixtures/project.py | 2 +- core/dbt/tests/util.py | 5 +- tests/unit/test_dbt_runner.py | 24 +++ tox.ini | 3 +- 8 files changed, 209 insertions(+), 85 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221214-112048.yaml create mode 100644 core/dbt/cli/requires.py create mode 100644 tests/unit/test_dbt_runner.py diff --git a/.changes/unreleased/Under the Hood-20221214-112048.yaml b/.changes/unreleased/Under the Hood-20221214-112048.yaml new file mode 100644 index 00000000000..9ac833b6e60 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221214-112048.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: functional tests run using click cli through dbtRunner +time: 2022-12-14T11:20:48.521869-05:00 +custom: + Author: MichelleArk + Issue: "6096" + PR: "6387" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index a7bbc9d6ae6..ce160fb8011 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -1,22 +1,20 @@ import inspect # This is temporary for RAT-ing from copy import copy from pprint import pformat as pf # This is temporary for RAT-ing +from typing import List, Tuple, Optional import click -from dbt.adapters.factory import adapter_management -from dbt.cli import params as p -from dbt.cli.flags import Flags +from dbt.cli import requires, params as p from dbt.config import RuntimeConfig -from dbt.config.runtime import load_project, load_profile -from dbt.events.functions import setup_event_logger -from dbt.profiler import profiler -from dbt.tracking import initialize_from_flags, track_run - +from dbt.config.project import Project +from dbt.config.profile import Profile +from dbt.contracts.graph.manifest import Manifest from dbt.task.clean import CleanTask from dbt.task.deps import DepsTask from dbt.task.run import RunTask +# CLI invocation def cli_runner(): # Alias "list" to "ls" ls = copy(cli.commands["list"]) @@ -27,6 +25,31 @@ def cli_runner(): cli() +class dbtUsageException(Exception): + pass + + +# Programmatic invocation +class dbtRunner: + def __init__( + self, project: Project = None, profile: Profile = None, manifest: Manifest = None + ): + self.project = project + self.profile = profile + self.manifest = manifest + + def invoke(self, args: List[str]) -> Tuple[Optional[List], bool]: + try: + dbt_ctx = cli.make_context(cli.name, args) + dbt_ctx.obj = {} + dbt_ctx.obj["project"] = self.project + dbt_ctx.obj["profile"] = self.profile + dbt_ctx.obj["manifest"] = self.manifest + return cli.invoke(dbt_ctx) + except (click.NoSuchOption, click.UsageError) as e: + raise dbtUsageException(e.message) + + # dbt @click.group( context_settings={"help_option_names": ["-h", "--help"]}, @@ -62,48 +85,11 @@ def cli(ctx, **kwargs): """An ELT tool for managing your SQL transformations and data models. For more documentation on these commands, visit: docs.getdbt.com """ - # Get primatives - flags = Flags() - - # Logging - # N.B. Legacy logger is not supported - setup_event_logger( - flags.LOG_PATH, - flags.LOG_FORMAT, - flags.USE_COLORS, - flags.DEBUG, - ) - - # Tracking - initialize_from_flags(flags.ANONYMOUS_USAGE_STATS, flags.PROFILES_DIR) - ctx.with_resource(track_run(run_command=ctx.invoked_subcommand)) - - # Profiling - if flags.RECORD_TIMING_INFO: - ctx.with_resource(profiler(enable=True, outfile=flags.RECORD_TIMING_INFO)) - - # Adapter management - ctx.with_resource(adapter_management()) - # Version info - if flags.VERSION: + if ctx.params["version"]: click.echo(f"`version` called\n ctx.params: {pf(ctx.params)}") return - # Profile - profile = load_profile( - flags.PROJECT_DIR, flags.VARS, flags.PROFILE, flags.TARGET, flags.THREADS - ) - - # Project - project = load_project(flags.PROJECT_DIR, flags.VERSION_CHECK, profile, flags.VARS) - - # Context for downstream commands - ctx.obj = {} - ctx.obj["flags"] = flags - ctx.obj["profile"] = profile - ctx.obj["project"] = project - # dbt build @cli.command("build") @@ -126,10 +112,11 @@ def cli(ctx, **kwargs): @p.threads @p.vars @p.version_check +@requires.preflight def build(ctx, **kwargs): """Run all Seeds, Models, Snapshots, and tests in DAG order""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + return None, True # dbt clean @@ -140,12 +127,12 @@ def build(ctx, **kwargs): @p.project_dir @p.target @p.vars +@requires.preflight +@requires.profile +@requires.project def clean(ctx, **kwargs): """Delete all folders in the clean-targets list (usually the dbt_packages and target directories.)""" - flags = Flags() - project = ctx.obj["project"] - - task = CleanTask(flags, project) + task = CleanTask(ctx.obj["flags"], ctx.obj["project"]) results = task.run() success = task.interpret_results(results) @@ -176,10 +163,11 @@ def docs(ctx, **kwargs): @p.threads @p.vars @p.version_check +@requires.preflight def docs_generate(ctx, **kwargs): """Generate the documentation website for your project""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + return None, True # dbt docs serve @@ -192,10 +180,11 @@ def docs_generate(ctx, **kwargs): @p.project_dir @p.target @p.vars +@requires.preflight def docs_serve(ctx, **kwargs): """Serve the documentation website for your project""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + return None, True # dbt compile @@ -216,10 +205,11 @@ def docs_serve(ctx, **kwargs): @p.threads @p.vars @p.version_check +@requires.preflight def compile(ctx, **kwargs): """Generates executable SQL from source, model, test, and analysis files. Compiled SQL files are written to the target/ directory.""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + return None, True # dbt debug @@ -232,10 +222,11 @@ def compile(ctx, **kwargs): @p.target @p.vars @p.version_check +@requires.preflight def debug(ctx, **kwargs): """Show some helpful information about dbt for debugging. Not to be confused with the --debug option which increases verbosity.""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + return None, True # dbt deps @@ -246,9 +237,12 @@ def debug(ctx, **kwargs): @p.project_dir @p.target @p.vars +@requires.preflight +@requires.profile +@requires.project def deps(ctx, **kwargs): """Pull the most recent version of the dependencies listed in packages.yml""" - flags = Flags() + flags = ctx.obj["flags"] project = ctx.obj["project"] task = DepsTask.from_project(project, flags.VARS) @@ -267,10 +261,11 @@ def deps(ctx, **kwargs): @p.skip_profile_setup @p.target @p.vars +@requires.preflight def init(ctx, **kwargs): """Initialize a new DBT project.""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + return None, True # dbt list @@ -289,10 +284,11 @@ def init(ctx, **kwargs): @p.state @p.target @p.vars +@requires.preflight def list(ctx, **kwargs): """List the resources in your project""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + return None, True # dbt parse @@ -308,10 +304,11 @@ def list(ctx, **kwargs): @p.vars @p.version_check @p.write_manifest +@requires.preflight def parse(ctx, **kwargs): """Parses the project and provides information on performance""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + return None, True # dbt run @@ -332,9 +329,11 @@ def parse(ctx, **kwargs): @p.threads @p.vars @p.version_check +@requires.preflight +@requires.profile +@requires.project def run(ctx, **kwargs): """Compile SQL and execute against the current target database.""" - config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) task = RunTask(ctx.obj["flags"], config) @@ -352,10 +351,11 @@ def run(ctx, **kwargs): @p.project_dir @p.target @p.vars +@requires.preflight def run_operation(ctx, **kwargs): """Run the named macro with any supplied arguments.""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + return None, True # dbt seed @@ -375,10 +375,11 @@ def run_operation(ctx, **kwargs): @p.threads @p.vars @p.version_check +@requires.preflight def seed(ctx, **kwargs): """Load data from csv files into your data warehouse.""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + return None, True # dbt snapshot @@ -395,10 +396,11 @@ def seed(ctx, **kwargs): @p.target @p.threads @p.vars +@requires.preflight def snapshot(ctx, **kwargs): """Execute snapshots defined in your project""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + return None, True # dbt source @@ -422,10 +424,11 @@ def source(ctx, **kwargs): @p.target @p.threads @p.vars +@requires.preflight def freshness(ctx, **kwargs): """Snapshots the current freshness of the project's sources""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + return None, True # dbt test @@ -447,10 +450,11 @@ def freshness(ctx, **kwargs): @p.threads @p.vars @p.version_check +@requires.preflight def test(ctx, **kwargs): """Runs tests on data in deployed models. Run this after `dbt run`""" - flags = Flags() - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}") + click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + return None, True # Support running as a module diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index dcfc16e6dab..a4119426895 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -131,7 +131,7 @@ "--log-path", envvar="DBT_LOG_PATH", help="Configure the 'log-path'. Only applies this setting for the current run. Overrides the 'DBT_LOG_PATH' if it is set.", - default=Path.cwd() / "logs", + default=lambda: Path.cwd() / "logs", type=click.Path(resolve_path=True, path_type=Path), ) @@ -214,7 +214,7 @@ "--profiles-dir", envvar="DBT_PROFILES_DIR", help="Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/", - default=default_profiles_dir(), + default=default_profiles_dir, type=click.Path(exists=True), ) @@ -222,7 +222,7 @@ "--project-dir", envvar=None, help="Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.", - default=default_project_dir(), + default=default_project_dir, type=click.Path(exists=True), ) diff --git a/core/dbt/cli/requires.py b/core/dbt/cli/requires.py new file mode 100644 index 00000000000..690c12bfa10 --- /dev/null +++ b/core/dbt/cli/requires.py @@ -0,0 +1,87 @@ +from dbt.adapters.factory import adapter_management +from dbt.cli.flags import Flags +from dbt.config.runtime import load_project, load_profile +from dbt.events.functions import setup_event_logger +from dbt.exceptions import DbtProjectError +from dbt.profiler import profiler +from dbt.tracking import initialize_from_flags, track_run + +from click import Context +from functools import update_wrapper + + +def preflight(func): + def wrapper(*args, **kwargs): + ctx = args[0] + assert isinstance(ctx, Context) + ctx.obj = ctx.obj or {} + + # Flags + flags = Flags(ctx) + ctx.obj["flags"] = flags + + # Tracking + initialize_from_flags(flags.ANONYMOUS_USAGE_STATS, flags.PROFILES_DIR) + ctx.with_resource(track_run(run_command=flags.WHICH)) + + # Logging + # N.B. Legacy logger is not supported + setup_event_logger( + flags.LOG_PATH, + flags.LOG_FORMAT, + flags.USE_COLORS, + flags.DEBUG, + ) + + # Profiling + if flags.RECORD_TIMING_INFO: + ctx.with_resource(profiler(enable=True, outfile=flags.RECORD_TIMING_INFO)) + + # Adapter management + ctx.with_resource(adapter_management()) + + return func(*args, **kwargs) + + return update_wrapper(wrapper, func) + + +def profile(func): + def wrapper(*args, **kwargs): + ctx = args[0] + assert isinstance(ctx, Context) + + if ctx.obj.get("profile") is None: + flags = ctx.obj["flags"] + # TODO: Generalize safe access to flags.THREADS: + # https://github.com/dbt-labs/dbt-core/issues/6259 + threads = getattr(flags, "THREADS", None) + profile = load_profile( + flags.PROJECT_DIR, flags.VARS, flags.PROFILE, flags.TARGET, threads + ) + ctx.obj["profile"] = profile + + return func(*args, **kwargs) + + return update_wrapper(wrapper, func) + + +def project(func): + def wrapper(*args, **kwargs): + ctx = args[0] + assert isinstance(ctx, Context) + + if ctx.obj.get("project") is None: + # TODO: Decouple target from profile, and remove the need for profile here: + # https://github.com/dbt-labs/dbt-core/issues/6257 + if not ctx.obj.get("profile"): + raise DbtProjectError("profile required for project") + + flags = ctx.obj["flags"] + project = load_project( + flags.PROJECT_DIR, flags.VERSION_CHECK, ctx.obj["profile"], flags.VARS + ) + ctx.obj["project"] = project + + return func(*args, **kwargs) + + return update_wrapper(wrapper, func) diff --git a/core/dbt/tests/fixtures/project.py b/core/dbt/tests/fixtures/project.py index fe97176cfb6..ffea566f4db 100644 --- a/core/dbt/tests/fixtures/project.py +++ b/core/dbt/tests/fixtures/project.py @@ -243,7 +243,7 @@ def selectors_yml(project_root, selectors): def adapter(unique_schema, project_root, profiles_root, profiles_yml, dbt_project_yml): # The profiles.yml and dbt_project.yml should already be written out args = Namespace( - profiles_dir=str(profiles_root), project_dir=str(project_root), target=None, profile=None + profiles_dir=str(profiles_root), project_dir=str(project_root), target=None, profile=None, threads=None ) flags.set_from_args(args, {}) runtime_config = RuntimeConfig.from_args(args) diff --git a/core/dbt/tests/util.py b/core/dbt/tests/util.py index af837c18b17..6cdc4ee5b77 100644 --- a/core/dbt/tests/util.py +++ b/core/dbt/tests/util.py @@ -8,7 +8,7 @@ from contextlib import contextmanager from dbt.adapters.factory import Adapter -from dbt.main import handle_and_check +from dbt.cli.main import dbtRunner from dbt.logger import log_manager from dbt.contracts.graph.manifest import Manifest from dbt.events.functions import fire_event, capture_stdout_logs, stop_capture_stdout_logs, reset_metadata_vars @@ -73,7 +73,8 @@ def run_dbt(args: List[str] = None, expect_pass=True): args = ["run"] print("\n\nInvoking dbt with {}".format(args)) - res, success = handle_and_check(args) + dbt = dbtRunner() + res, success = dbt.invoke(args) if expect_pass is not None: assert success == expect_pass, "dbt exit state did not match expected" diff --git a/tests/unit/test_dbt_runner.py b/tests/unit/test_dbt_runner.py new file mode 100644 index 00000000000..2e4bb5e71a3 --- /dev/null +++ b/tests/unit/test_dbt_runner.py @@ -0,0 +1,24 @@ +import pytest + +from dbt.cli.main import dbtRunner, dbtUsageException + + +class TestDbtRunner: + @pytest.fixture + def dbt(self) -> dbtRunner: + return dbtRunner() + + def test_group_invalid_option(self, dbt: dbtRunner) -> None: + with pytest.raises(dbtUsageException): + dbt.invoke(["--invalid-option"]) + + def test_command_invalid_option(self, dbt: dbtRunner) -> None: + with pytest.raises(dbtUsageException): + dbt.invoke(["deps", "--invalid-option"]) + + def test_invalid_command(self, dbt: dbtRunner) -> None: + with pytest.raises(dbtUsageException): + dbt.invoke(["invalid-command"]) + + def test_invoke_version(self, dbt: dbtRunner) -> None: + dbt.invoke(["--version"]) diff --git a/tox.ini b/tox.ini index 84fc54c5957..c77b9f92272 100644 --- a/tox.ini +++ b/tox.ini @@ -25,9 +25,10 @@ passenv = POSTGRES_TEST_* PYTEST_ADDOPTS commands = - {envpython} -m pytest --cov=core -m profile_postgres {posargs} test/integration {envpython} -m pytest --cov=core {posargs} tests/functional {envpython} -m pytest --cov=core {posargs} tests/adapter + {envpython} -m pytest --cov=core -m profile_postgres {posargs} test/integration + deps = -rdev-requirements.txt From cc5a38ec5aa96e16f0da8a9e11614299115fd20f Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Tue, 3 Jan 2023 18:46:34 -0500 Subject: [PATCH 13/54] Example click API usage (#6307) * Example python lib w click, written docs Co-authored-by: Chenyu Li --- core/dbt/cli/__init__.py | 1 + core/dbt/cli/context.py | 16 ++++++++++++++++ core/dbt/cli/example.py | 20 ++++++++++++++++++++ core/dbt/docs/source/index.rst | 32 ++++++++++++++++++++++++++++++++ 4 files changed, 69 insertions(+) create mode 100644 core/dbt/cli/context.py create mode 100644 core/dbt/cli/example.py diff --git a/core/dbt/cli/__init__.py b/core/dbt/cli/__init__.py index e69de29bb2d..8dc5c408aa2 100644 --- a/core/dbt/cli/__init__.py +++ b/core/dbt/cli/__init__.py @@ -0,0 +1 @@ +from .main import cli as dbt_cli # noqa diff --git a/core/dbt/cli/context.py b/core/dbt/cli/context.py new file mode 100644 index 00000000000..b8f541b9ad8 --- /dev/null +++ b/core/dbt/cli/context.py @@ -0,0 +1,16 @@ +import click +from typing import Optional + +from dbt.cli.main import cli as dbt + + +def make_context(args, command=dbt) -> Optional[click.Context]: + try: + ctx = command.make_context(command.name, args) + except click.exceptions.Exit: + return None + + ctx.invoked_subcommand = ctx.protected_args[0] if ctx.protected_args else None + ctx.obj = {} + + return ctx diff --git a/core/dbt/cli/example.py b/core/dbt/cli/example.py new file mode 100644 index 00000000000..afa6820efc8 --- /dev/null +++ b/core/dbt/cli/example.py @@ -0,0 +1,20 @@ +from dbt.cli.main import dbtRunner +from dbt.config.runtime import load_profile, load_project + +if __name__ == "__main__": + project_dir = "/Users/chenyuli/git/jaffle_shop" + cli_args = ["run", "--project-dir", project_dir] + + # initialize the dbt runner + dbt = dbtRunner() + # run the command + res, success = dbt.invoke(cli_args) + + # preload profile and project + profile = load_profile(project_dir, {}, "testing-postgres") + project = load_project(project_dir, False, profile, {}) + + # initialize the runner with pre-loaded profile and project, you can also pass in a preloaded manifest + dbt = dbtRunner(profile=profile, project=project) + # run the command, this will use the pre-loaded profile and project instead of loading + res, success = dbt.invoke(cli_args) diff --git a/core/dbt/docs/source/index.rst b/core/dbt/docs/source/index.rst index d5e3c6007af..93d34a648f2 100644 --- a/core/dbt/docs/source/index.rst +++ b/core/dbt/docs/source/index.rst @@ -1,4 +1,36 @@ dbt-core's API documentation ============================ +How to invoke dbt commands in python runtime +-------------------------------------------- + +Right now the best way to invoke a command from python runtime is to use the `dbtRunner` we exposed + +.. code-block:: python + from dbt.cli.main import dbtRunner + cli_args = ['run', '--project-dir', 'jaffle_shop'] + + # initialize the dbt runner + dbt = dbtRunner() + # run the command + res, success = dbt.invoke(args) + +You can also pass in pre constructed object into dbtRunner, and we will use those objects instead of loading up from the disk. + +.. code-block:: python + + # preload profile and project + profile = load_profile(project_dir, {}, 'testing-postgres') + project = load_project(project_dir, False, profile, {}) + + # initialize the runner with pre-loaded profile and project + dbt = dbtRunner(profile=profile, project=project) + # run the command, this will use the pre-loaded profile and project instead of loading + res, success = dbt.invoke(cli_args) + + +For the full example code, you can refer to `core/dbt/cli/example.py` + +API documentation +----------------- .. dbt_click:: dbt.cli.main:cli From 9bb1250869d6d004f2a81656986bd03c03cf12da Mon Sep 17 00:00:00 2001 From: Chenyu Li Date: Fri, 6 Jan 2023 15:11:23 -0800 Subject: [PATCH 14/54] merge main to feature/click-cli (#6483) * merge main to feature/click-cli * fix var hash * move back changes in feature branch for deps * fix logging issues --- .bumpversion.cfg | 2 +- .changes/1.4.0-b1.md | 89 + .../1.4.0/Dependency-20220923-000646.yaml | 6 + .../Dependency-20221007-000848.yaml | 5 +- .../1.4.0/Dependency-20221020-000753.yaml | 6 + .../1.4.0/Dependency-20221026-000910.yaml | 6 + .../1.4.0/Dependency-20221205-002118.yaml | 7 + .../Docs-20220908-154157.yaml | 1 - .../Docs-20221007-090656.yaml | 1 - .changes/1.4.0/Docs-20221017-171411.yaml | 5 + .changes/1.4.0/Docs-20221116-155743.yaml | 6 + .changes/1.4.0/Docs-20221202-150523.yaml | 6 + .changes/1.4.0/Features-20220408-165459.yaml | 7 + .changes/1.4.0/Features-20220817-154857.yaml | 6 + .changes/1.4.0/Features-20220823-085727.yaml | 7 + .../Features-20220912-125935.yaml | 1 - .../Features-20220914-095625.yaml | 1 - .../Features-20220925-211651.yaml | 1 - .../Features-20221003-110705.yaml | 1 - .changes/1.4.0/Features-20221102-150003.yaml | 7 + .changes/1.4.0/Features-20221107-105018.yaml | 8 + .changes/1.4.0/Features-20221114-185207.yaml | 6 + .changes/1.4.0/Features-20221130-112913.yaml | 6 + .changes/1.4.0/Features-20221206-150704.yaml | 7 + .../Fixes-20220916-104854.yaml | 1 - .../Fixes-20221010-113218.yaml | 1 - .../Fixes-20221011-160715.yaml | 1 - .changes/1.4.0/Fixes-20221016-173742.yaml | 7 + .changes/1.4.0/Fixes-20221107-095314.yaml | 6 + .changes/1.4.0/Fixes-20221115-081021.yaml | 6 + .changes/1.4.0/Fixes-20221124-163419.yaml | 7 + .changes/1.4.0/Fixes-20221202-164859.yaml | 6 + .changes/1.4.0/Fixes-20221213-112620.yaml | 6 + .changes/1.4.0/Fixes-20221214-155307.yaml | 7 + .../Under the Hood-20220927-194259.yaml | 1 - .../Under the Hood-20220929-134406.yaml | 1 - .../Under the Hood-20221005-120310.yaml | 1 - .../Under the Hood-20221007-094627.yaml | 1 - .../Under the Hood-20221007-140044.yaml | 1 - .../Under the Hood-20221013-181912.yaml | 1 - .../1.4.0/Under the Hood-20221017-151511.yaml | 6 + .../1.4.0/Under the Hood-20221017-155844.yaml | 6 + .../1.4.0/Under the Hood-20221028-104837.yaml | 6 + .../1.4.0/Under the Hood-20221028-110344.yaml | 6 + .../1.4.0/Under the Hood-20221108-074550.yaml | 6 + .../1.4.0/Under the Hood-20221108-115633.yaml | 6 + .../1.4.0/Under the Hood-20221108-133104.yaml | 6 + .../1.4.0/Under the Hood-20221116-130037.yaml | 6 + .../1.4.0/Under the Hood-20221118-145717.yaml | 8 + .../1.4.0/Under the Hood-20221205-164948.yaml | 7 + .../1.4.0/Under the Hood-20221206-094015.yaml | 7 + .../1.4.0/Under the Hood-20221206-113053.yaml | 7 + .../1.4.0/Under the Hood-20221211-214240.yaml | 7 + .../1.4.0/Under the Hood-20221213-214106.yaml | 7 + .../Breaking Changes-20221205-141937.yaml | 9 + .../unreleased/Features-20220817-154857.yaml | 7 - .../unreleased/Fixes-20221117-220320.yaml | 7 + .../unreleased/Fixes-20221213-113915.yaml | 6 + .../Under the Hood-20221219-193435.yaml | 6 + .changie.yaml | 107 +- .gitattributes | 2 + .github/workflows/bot-changelog.yml | 4 +- .github/workflows/generate-cli-api-docs.yml | 165 + .github/workflows/main.yml | 4 +- .github/workflows/stale.yml | 11 +- .gitignore | 1 + .pre-commit-config.yaml | 2 +- CHANGELOG.md | 90 + CONTRIBUTING.md | 16 +- Dockerfile.test | 3 + core/dbt/README.md | 49 +- core/dbt/adapters/base/connections.py | 31 +- core/dbt/adapters/base/impl.py | 91 +- core/dbt/adapters/base/query_headers.py | 4 +- core/dbt/adapters/base/relation.py | 47 +- core/dbt/adapters/cache.py | 46 +- core/dbt/adapters/protocol.py | 12 +- core/dbt/adapters/sql/connections.py | 21 +- core/dbt/adapters/sql/impl.py | 7 +- core/dbt/cli/main.py | 1 - core/dbt/cli/params.py | 8 - core/dbt/clients/_jinja_blocks.py | 55 +- core/dbt/clients/git.py | 25 +- core/dbt/clients/jinja.py | 29 +- core/dbt/clients/jinja_static.py | 12 +- core/dbt/clients/system.py | 3 +- core/dbt/compilation.py | 88 +- core/dbt/config/profile.py | 16 +- core/dbt/config/project.py | 24 +- core/dbt/config/runtime.py | 89 +- core/dbt/config/utils.py | 8 +- core/dbt/constants.py | 7 + core/dbt/context/base.py | 51 +- core/dbt/context/configured.py | 7 +- core/dbt/context/docs.py | 17 +- core/dbt/context/exceptions_jinja.py | 142 + core/dbt/context/macro_resolver.py | 16 +- core/dbt/context/macros.py | 18 +- core/dbt/context/providers.py | 188 +- core/dbt/context/secret.py | 5 +- core/dbt/contracts/connection.py | 5 +- core/dbt/contracts/files.py | 6 +- core/dbt/contracts/graph/compiled.py | 235 - core/dbt/contracts/graph/manifest.py | 183 +- core/dbt/contracts/graph/metrics.py | 2 +- .../contracts/graph/{parsed.py => nodes.py} | 725 +- core/dbt/contracts/graph/unparsed.py | 2 - core/dbt/contracts/project.py | 15 +- core/dbt/contracts/relation.py | 6 +- core/dbt/contracts/results.py | 38 +- core/dbt/contracts/sql.py | 4 +- core/dbt/contracts/util.py | 51 +- core/dbt/deprecations.py | 84 +- core/dbt/deps/README.md | 6 +- core/dbt/deps/base.py | 33 +- core/dbt/deps/git.py | 34 +- core/dbt/deps/registry.py | 44 +- core/dbt/deps/resolver.py | 30 +- core/dbt/deps/tarball.py | 74 + .../docs/build/doctrees/environment.pickle | Bin 0 -> 65160 bytes core/dbt/docs/build/doctrees/index.doctree | Bin 0 -> 87794 bytes core/dbt/docs/build/html/.buildinfo | 4 + .../docs/build/html/_sources/index.rst.txt | 4 + .../_sphinx_javascript_frameworks_compat.js | 134 + .../dbt/docs/build/html/_static/alabaster.css | 701 + core/dbt/docs/build/html/_static/basic.css | 900 ++ core/dbt/docs/build/html/_static/custom.css | 1 + core/dbt/docs/build/html/_static/doctools.js | 156 + .../html/_static/documentation_options.js | 14 + core/dbt/docs/build/html/_static/file.png | Bin 0 -> 286 bytes .../docs/build/html/_static/jquery-3.6.0.js | 10881 ++++++++++++++++ core/dbt/docs/build/html/_static/jquery.js | 2 + .../docs/build/html/_static/language_data.js | 199 + core/dbt/docs/build/html/_static/minus.png | Bin 0 -> 90 bytes core/dbt/docs/build/html/_static/plus.png | Bin 0 -> 90 bytes core/dbt/docs/build/html/_static/pygments.css | 83 + .../docs/build/html/_static/searchtools.js | 566 + .../build/html/_static/sphinx_highlight.js | 144 + .../build/html/_static/underscore-1.13.1.js | 2042 +++ .../dbt/docs/build/html/_static/underscore.js | 6 + core/dbt/docs/build/html/genindex.html | 102 + core/dbt/docs/build/html/index.html | 855 ++ core/dbt/docs/build/html/objects.inv | Bin 0 -> 250 bytes core/dbt/docs/build/html/search.html | 121 + core/dbt/docs/build/html/searchindex.js | 1 + core/dbt/docs/source/conf.py | 2 +- core/dbt/events/README.md | 7 +- core/dbt/events/adapter_endpoint.py | 25 +- core/dbt/events/base_types.py | 53 +- core/dbt/events/contextvars.py | 84 + core/dbt/events/eventmgr.py | 212 + core/dbt/events/functions.py | 383 +- core/dbt/events/helpers.py | 16 + core/dbt/events/proto_types.py | 520 +- core/dbt/events/test_types.py | 15 - core/dbt/events/types.proto | 444 +- core/dbt/events/types.py | 1097 +- core/dbt/exceptions.py | 2485 +++- core/dbt/flags.py | 10 +- core/dbt/graph/queue.py | 10 +- core/dbt/graph/selector.py | 20 +- core/dbt/graph/selector_methods.py | 54 +- core/dbt/helper_types.py | 4 +- .../models/incremental/incremental.sql | 4 +- .../models/incremental/merge.sql | 30 +- .../models/incremental/strategies.sql | 6 +- .../macros/python_model/python.sql | 5 +- core/dbt/include/index.html | 2 +- core/dbt/lib.py | 20 +- core/dbt/logger.py | 5 - core/dbt/main.py | 27 +- core/dbt/node_types.py | 2 +- core/dbt/parser/README.md | 22 +- core/dbt/parser/analysis.py | 10 +- core/dbt/parser/base.py | 39 +- core/dbt/parser/docs.py | 15 +- core/dbt/parser/generic_test.py | 17 +- core/dbt/parser/generic_test_builders.py | 73 +- core/dbt/parser/hooks.py | 12 +- core/dbt/parser/macros.py | 18 +- core/dbt/parser/manifest.py | 247 +- core/dbt/parser/models.py | 123 +- core/dbt/parser/partial.py | 2 +- core/dbt/parser/schemas.py | 163 +- core/dbt/parser/seeds.py | 17 +- core/dbt/parser/singular_test.py | 10 +- core/dbt/parser/snapshots.py | 14 +- core/dbt/parser/sources.py | 61 +- core/dbt/parser/sql.py | 15 +- core/dbt/task/base.py | 28 +- core/dbt/task/debug.py | 4 +- core/dbt/task/deps.py | 19 +- core/dbt/task/freshness.py | 70 +- core/dbt/task/generate.py | 8 +- core/dbt/task/list.py | 20 +- core/dbt/task/printer.py | 2 + core/dbt/task/run.py | 105 +- core/dbt/task/run_operation.py | 6 +- core/dbt/task/runnable.py | 64 +- core/dbt/task/seed.py | 45 +- core/dbt/task/snapshot.py | 39 +- core/dbt/task/test.py | 86 +- core/dbt/tests/fixtures/project.py | 21 +- core/dbt/tests/util.py | 6 +- core/dbt/utils.py | 4 +- core/dbt/version.py | 2 +- core/setup.py | 9 +- docker/Dockerfile | 12 +- docker/README.md | 2 +- .../dbt/adapters/postgres/__version__.py | 2 +- .../postgres/dbt/adapters/postgres/impl.py | 31 +- plugins/postgres/setup.py | 3 +- schemas/dbt/manifest/v8.json | 4362 +++++++ .../023_exit_codes_tests/models/bad.sql | 2 - .../023_exit_codes_tests/models/dupe.sql | 8 - .../023_exit_codes_tests/models/good.sql | 8 - .../023_exit_codes_tests/models/schema.yml | 17 - .../023_exit_codes_tests/seeds-bad/data.csv | 2 - .../023_exit_codes_tests/seeds-good/data.csv | 2 - .../023_exit_codes_tests/snapshots-bad/b.sql | 4 - .../023_exit_codes_tests/snapshots-good/g.sql | 4 - .../023_exit_codes_tests/test_exit_codes.py | 200 - .../models/statement_actual.sql | 23 - .../seed/statement_expected.csv | 3 - .../030_statement_tests/test_statements.py | 36 - .../031_thread_count_tests/models/.gitkeep | 0 .../models/do_nothing_1.sql | 1 - .../models/do_nothing_10.sql | 1 - .../models/do_nothing_11.sql | 1 - .../models/do_nothing_12.sql | 1 - .../models/do_nothing_13.sql | 1 - .../models/do_nothing_14.sql | 1 - .../models/do_nothing_15.sql | 1 - .../models/do_nothing_16.sql | 1 - .../models/do_nothing_17.sql | 1 - .../models/do_nothing_18.sql | 1 - .../models/do_nothing_19.sql | 1 - .../models/do_nothing_2.sql | 1 - .../models/do_nothing_20.sql | 1 - .../models/do_nothing_3.sql | 1 - .../models/do_nothing_4.sql | 1 - .../models/do_nothing_5.sql | 1 - .../models/do_nothing_6.sql | 1 - .../models/do_nothing_7.sql | 1 - .../models/do_nothing_8.sql | 1 - .../models/do_nothing_9.sql | 1 - .../test_thread_count.py | 28 - .../macros/sad_macros.sql | 7 - .../044_run_operations_tests/models/model.sql | 1 - .../test_run_operations.py | 76 - .../049_dbt_debug_tests/models/model.sql | 1 - .../049_dbt_debug_tests/test_debug.py | 158 - .../models/do_nothing_then_fail.sql | 1 - .../test_no_use_colors.py | 29 - .../061_use_colors_tests/test_use_colors.py | 29 - .../062_defer_state_tests/test_defer_state.py | 124 + ..._characters_incremental_abcdefghijklmn.sql | 9 - ...characters_abcdefghijklmnopqrstuvwxyz0.sql | 8 - ...abcdefghijklmnopqrstuvwxyz012345678901.sql | 8 - ...bcdefghijklmnopqrstuvwxyz0123456789012.sql | 8 - .../063_relation_name_tests/seeds/seed.csv | 4 - .../test_relation_name.py | 74 - .../models-invalid/invalid_columns_type.sql | 10 - .../models-invalid/invalid_type.sql | 10 - .../models-invalid/invalid_unique_config.sql | 10 - .../models-invalid/missing_columns.sql | 10 - .../models/incremental.sql | 18 - .../065_postgres_index_tests/models/table.sql | 14 - .../065_postgres_index_tests/seeds/seed.csv | 4 - .../snapshots/colors.sql | 29 - .../test_postgres_indexes.py | 134 - .../models/fine_model.sql | 1 - ...odel_but_with_a_no_good_very_long_name.sql | 1 - .../models/problematic_model.sql | 11 - .../models/schema.yml | 40 - .../expected/expected_accepted_values.csv | 3 - .../seeds/expected/expected_failing_test.csv | 11 - ...expected_not_null_problematic_model_id.csv | 3 - .../expected_unique_problematic_model_id.csv | 3 - .../seeds/people.csv | 11 - .../test_store_test_failures.py | 91 - .../tests/failing_test.sql | 1 - .../tests/passing_test.sql | 2 - .../models/incremental_append_new_columns.sql | 29 - ...remental_append_new_columns_remove_one.sql | 28 - ...l_append_new_columns_remove_one_target.sql | 19 - .../incremental_append_new_columns_target.sql | 19 - .../models/incremental_fail.sql | 19 - .../models/incremental_ignore.sql | 19 - .../models/incremental_ignore_target.sql | 15 - .../models/incremental_sync_all_columns.sql | 31 - .../incremental_sync_all_columns_target.sql | 20 - .../models/incremental_sync_remove_only.sql | 29 - .../incremental_sync_remove_only_target.sql | 17 - .../models/model_a.sql | 22 - .../models/schema.yml | 54 - .../test_incremental_schema.py | 88 - .../tests/select_from_a.sql | 1 - ...ct_from_incremental_append_new_columns.sql | 1 - ..._incremental_append_new_columns_target.sql | 1 - .../tests/select_from_incremental_ignore.sql | 1 - .../select_from_incremental_ignore_target.sql | 1 - ...lect_from_incremental_sync_all_columns.sql | 1 - ...om_incremental_sync_all_columns_target.sql | 1 - test/integration/base.py | 15 +- test/unit/test_compiler.py | 48 +- test/unit/test_config.py | 56 +- test/unit/test_context.py | 19 +- test/unit/test_contracts_graph_compiled.py | 44 +- test/unit/test_contracts_graph_parsed.py | 188 +- test/unit/test_contracts_graph_unparsed.py | 15 - test/unit/test_deps.py | 43 +- test/unit/test_docs_blocks.py | 10 +- test/unit/test_flags.py | 15 - test/unit/test_graph_selector_methods.py | 71 +- test/unit/test_macro_resolver.py | 6 +- test/unit/test_manifest.py | 85 +- test/unit/test_node_types.py | 2 +- test/unit/test_parser.py | 485 +- test/unit/test_partial_parsing.py | 8 +- test/unit/utils.py | 23 +- tests/CONVERTING.md | 2 +- .../adapter/dbt/tests/adapter/__version__.py | 2 +- .../dbt/tests/adapter/aliases/test_aliases.py | 24 +- .../tests/adapter/dbt_debug/test_dbt_debug.py | 107 + .../test_incremental_predicates.py | 154 + tests/adapter/setup.py | 2 +- .../artifacts/data/state/v8/manifest.json | 1 + .../functional/artifacts/expected_manifest.py | 122 +- .../artifacts/test_previous_version_state.py | 2 +- tests/functional/colors/test_colors.py | 43 + .../context_methods/test_builtin_functions.py | 12 +- .../duplicates/test_duplicate_model.py | 4 +- tests/functional/exit_codes/fixtures.py | 78 + .../functional/exit_codes/test_exit_codes.py | 124 + tests/functional/exposures/fixtures.py | 25 + .../exposures/test_exposure_configs.py | 8 +- tests/functional/exposures/test_exposures.py | 15 + .../incremental_schema_tests/fixtures.py | 395 + .../test_incremental_schema.py | 136 + tests/functional/list/test_list.py | 1 - tests/functional/logging/test_logging.py | 51 + .../partial_parsing/test_pp_docs.py | 4 +- tests/functional/postgres/fixtures.py | 134 + .../postgres/test_postgres_indexes.py | 149 + .../relation_names/test_relation_name.py | 124 + .../functional/run_operations/fixtures.py | 18 +- .../run_operations/test_run_operations.py | 104 + .../schema_tests/test_schema_v2_tests.py | 6 +- .../functional/statements/fixtures.py | 39 +- .../functional/statements/test_statements.py | 43 + .../store_test_failures_tests/fixtures.py | 126 + .../test_store_test_failures.py | 152 + .../functional/threading/test_thread_count.py | 46 + tests/unit/test_events.py | 512 +- tests/unit/test_proto_events.py | 22 +- tests/unit/test_version.py | 8 +- tox.ini | 9 +- 358 files changed, 30635 insertions(+), 6772 deletions(-) create mode 100644 .changes/1.4.0-b1.md create mode 100644 .changes/1.4.0/Dependency-20220923-000646.yaml rename .changes/{unreleased => 1.4.0}/Dependency-20221007-000848.yaml (71%) create mode 100644 .changes/1.4.0/Dependency-20221020-000753.yaml create mode 100644 .changes/1.4.0/Dependency-20221026-000910.yaml create mode 100644 .changes/1.4.0/Dependency-20221205-002118.yaml rename .changes/{unreleased => 1.4.0}/Docs-20220908-154157.yaml (90%) rename .changes/{unreleased => 1.4.0}/Docs-20221007-090656.yaml (91%) create mode 100644 .changes/1.4.0/Docs-20221017-171411.yaml create mode 100644 .changes/1.4.0/Docs-20221116-155743.yaml create mode 100644 .changes/1.4.0/Docs-20221202-150523.yaml create mode 100644 .changes/1.4.0/Features-20220408-165459.yaml create mode 100644 .changes/1.4.0/Features-20220817-154857.yaml create mode 100644 .changes/1.4.0/Features-20220823-085727.yaml rename .changes/{unreleased => 1.4.0}/Features-20220912-125935.yaml (92%) rename .changes/{unreleased => 1.4.0}/Features-20220914-095625.yaml (93%) rename .changes/{unreleased => 1.4.0}/Features-20220925-211651.yaml (92%) rename .changes/{unreleased => 1.4.0}/Features-20221003-110705.yaml (92%) create mode 100644 .changes/1.4.0/Features-20221102-150003.yaml create mode 100644 .changes/1.4.0/Features-20221107-105018.yaml create mode 100644 .changes/1.4.0/Features-20221114-185207.yaml create mode 100644 .changes/1.4.0/Features-20221130-112913.yaml create mode 100644 .changes/1.4.0/Features-20221206-150704.yaml rename .changes/{unreleased => 1.4.0}/Fixes-20220916-104854.yaml (92%) rename .changes/{unreleased => 1.4.0}/Fixes-20221010-113218.yaml (92%) rename .changes/{unreleased => 1.4.0}/Fixes-20221011-160715.yaml (92%) create mode 100644 .changes/1.4.0/Fixes-20221016-173742.yaml create mode 100644 .changes/1.4.0/Fixes-20221107-095314.yaml create mode 100644 .changes/1.4.0/Fixes-20221115-081021.yaml create mode 100644 .changes/1.4.0/Fixes-20221124-163419.yaml create mode 100644 .changes/1.4.0/Fixes-20221202-164859.yaml create mode 100644 .changes/1.4.0/Fixes-20221213-112620.yaml create mode 100644 .changes/1.4.0/Fixes-20221214-155307.yaml rename .changes/{unreleased => 1.4.0}/Under the Hood-20220927-194259.yaml (91%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20220929-134406.yaml (93%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221005-120310.yaml (92%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221007-094627.yaml (91%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221007-140044.yaml (91%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221013-181912.yaml (93%) create mode 100644 .changes/1.4.0/Under the Hood-20221017-151511.yaml create mode 100644 .changes/1.4.0/Under the Hood-20221017-155844.yaml create mode 100644 .changes/1.4.0/Under the Hood-20221028-104837.yaml create mode 100644 .changes/1.4.0/Under the Hood-20221028-110344.yaml create mode 100644 .changes/1.4.0/Under the Hood-20221108-074550.yaml create mode 100644 .changes/1.4.0/Under the Hood-20221108-115633.yaml create mode 100644 .changes/1.4.0/Under the Hood-20221108-133104.yaml create mode 100644 .changes/1.4.0/Under the Hood-20221116-130037.yaml create mode 100644 .changes/1.4.0/Under the Hood-20221118-145717.yaml create mode 100644 .changes/1.4.0/Under the Hood-20221205-164948.yaml create mode 100644 .changes/1.4.0/Under the Hood-20221206-094015.yaml create mode 100644 .changes/1.4.0/Under the Hood-20221206-113053.yaml create mode 100644 .changes/1.4.0/Under the Hood-20221211-214240.yaml create mode 100644 .changes/1.4.0/Under the Hood-20221213-214106.yaml create mode 100644 .changes/unreleased/Breaking Changes-20221205-141937.yaml delete mode 100644 .changes/unreleased/Features-20220817-154857.yaml create mode 100644 .changes/unreleased/Fixes-20221117-220320.yaml create mode 100644 .changes/unreleased/Fixes-20221213-113915.yaml create mode 100644 .changes/unreleased/Under the Hood-20221219-193435.yaml create mode 100644 .gitattributes create mode 100644 .github/workflows/generate-cli-api-docs.yml create mode 100644 core/dbt/context/exceptions_jinja.py delete mode 100644 core/dbt/contracts/graph/compiled.py rename core/dbt/contracts/graph/{parsed.py => nodes.py} (65%) create mode 100644 core/dbt/deps/tarball.py create mode 100644 core/dbt/docs/build/doctrees/environment.pickle create mode 100644 core/dbt/docs/build/doctrees/index.doctree create mode 100644 core/dbt/docs/build/html/.buildinfo create mode 100644 core/dbt/docs/build/html/_sources/index.rst.txt create mode 100644 core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js create mode 100644 core/dbt/docs/build/html/_static/alabaster.css create mode 100644 core/dbt/docs/build/html/_static/basic.css create mode 100644 core/dbt/docs/build/html/_static/custom.css create mode 100644 core/dbt/docs/build/html/_static/doctools.js create mode 100644 core/dbt/docs/build/html/_static/documentation_options.js create mode 100644 core/dbt/docs/build/html/_static/file.png create mode 100644 core/dbt/docs/build/html/_static/jquery-3.6.0.js create mode 100644 core/dbt/docs/build/html/_static/jquery.js create mode 100644 core/dbt/docs/build/html/_static/language_data.js create mode 100644 core/dbt/docs/build/html/_static/minus.png create mode 100644 core/dbt/docs/build/html/_static/plus.png create mode 100644 core/dbt/docs/build/html/_static/pygments.css create mode 100644 core/dbt/docs/build/html/_static/searchtools.js create mode 100644 core/dbt/docs/build/html/_static/sphinx_highlight.js create mode 100644 core/dbt/docs/build/html/_static/underscore-1.13.1.js create mode 100644 core/dbt/docs/build/html/_static/underscore.js create mode 100644 core/dbt/docs/build/html/genindex.html create mode 100644 core/dbt/docs/build/html/index.html create mode 100644 core/dbt/docs/build/html/objects.inv create mode 100644 core/dbt/docs/build/html/search.html create mode 100644 core/dbt/docs/build/html/searchindex.js create mode 100644 core/dbt/events/contextvars.py create mode 100644 core/dbt/events/eventmgr.py create mode 100644 core/dbt/events/helpers.py create mode 100644 schemas/dbt/manifest/v8.json delete mode 100644 test/integration/023_exit_codes_tests/models/bad.sql delete mode 100644 test/integration/023_exit_codes_tests/models/dupe.sql delete mode 100644 test/integration/023_exit_codes_tests/models/good.sql delete mode 100644 test/integration/023_exit_codes_tests/models/schema.yml delete mode 100644 test/integration/023_exit_codes_tests/seeds-bad/data.csv delete mode 100644 test/integration/023_exit_codes_tests/seeds-good/data.csv delete mode 100644 test/integration/023_exit_codes_tests/snapshots-bad/b.sql delete mode 100644 test/integration/023_exit_codes_tests/snapshots-good/g.sql delete mode 100644 test/integration/023_exit_codes_tests/test_exit_codes.py delete mode 100644 test/integration/030_statement_tests/models/statement_actual.sql delete mode 100644 test/integration/030_statement_tests/seed/statement_expected.csv delete mode 100644 test/integration/030_statement_tests/test_statements.py delete mode 100644 test/integration/031_thread_count_tests/models/.gitkeep delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_1.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_10.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_11.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_12.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_13.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_14.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_15.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_16.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_17.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_18.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_19.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_2.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_20.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_3.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_4.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_5.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_6.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_7.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_8.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_9.sql delete mode 100644 test/integration/031_thread_count_tests/test_thread_count.py delete mode 100644 test/integration/044_run_operations_tests/macros/sad_macros.sql delete mode 100644 test/integration/044_run_operations_tests/models/model.sql delete mode 100644 test/integration/044_run_operations_tests/test_run_operations.py delete mode 100644 test/integration/049_dbt_debug_tests/models/model.sql delete mode 100644 test/integration/049_dbt_debug_tests/test_debug.py delete mode 100644 test/integration/061_use_colors_tests/models/do_nothing_then_fail.sql delete mode 100644 test/integration/061_use_colors_tests/test_no_use_colors.py delete mode 100644 test/integration/061_use_colors_tests/test_use_colors.py delete mode 100644 test/integration/063_relation_name_tests/models/my_name_is_51_characters_incremental_abcdefghijklmn.sql delete mode 100644 test/integration/063_relation_name_tests/models/my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0.sql delete mode 100644 test/integration/063_relation_name_tests/models/my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901.sql delete mode 100644 test/integration/063_relation_name_tests/models/my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012.sql delete mode 100644 test/integration/063_relation_name_tests/seeds/seed.csv delete mode 100644 test/integration/063_relation_name_tests/test_relation_name.py delete mode 100644 test/integration/065_postgres_index_tests/models-invalid/invalid_columns_type.sql delete mode 100644 test/integration/065_postgres_index_tests/models-invalid/invalid_type.sql delete mode 100644 test/integration/065_postgres_index_tests/models-invalid/invalid_unique_config.sql delete mode 100644 test/integration/065_postgres_index_tests/models-invalid/missing_columns.sql delete mode 100644 test/integration/065_postgres_index_tests/models/incremental.sql delete mode 100644 test/integration/065_postgres_index_tests/models/table.sql delete mode 100644 test/integration/065_postgres_index_tests/seeds/seed.csv delete mode 100644 test/integration/065_postgres_index_tests/snapshots/colors.sql delete mode 100644 test/integration/065_postgres_index_tests/test_postgres_indexes.py delete mode 100644 test/integration/067_store_test_failures_tests/models/fine_model.sql delete mode 100644 test/integration/067_store_test_failures_tests/models/fine_model_but_with_a_no_good_very_long_name.sql delete mode 100644 test/integration/067_store_test_failures_tests/models/problematic_model.sql delete mode 100644 test/integration/067_store_test_failures_tests/models/schema.yml delete mode 100644 test/integration/067_store_test_failures_tests/seeds/expected/expected_accepted_values.csv delete mode 100644 test/integration/067_store_test_failures_tests/seeds/expected/expected_failing_test.csv delete mode 100644 test/integration/067_store_test_failures_tests/seeds/expected/expected_not_null_problematic_model_id.csv delete mode 100644 test/integration/067_store_test_failures_tests/seeds/expected/expected_unique_problematic_model_id.csv delete mode 100644 test/integration/067_store_test_failures_tests/seeds/people.csv delete mode 100644 test/integration/067_store_test_failures_tests/test_store_test_failures.py delete mode 100644 test/integration/067_store_test_failures_tests/tests/failing_test.sql delete mode 100644 test/integration/067_store_test_failures_tests/tests/passing_test.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_append_new_columns.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_fail.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_ignore.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_ignore_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/model_a.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/schema.yml delete mode 100644 test/integration/070_incremental_schema_tests/test_incremental_schema.py delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_a.sql delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns.sql delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore.sql delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns.sql delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns_target.sql create mode 100644 tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py create mode 100644 tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py create mode 100644 tests/functional/artifacts/data/state/v8/manifest.json create mode 100644 tests/functional/colors/test_colors.py create mode 100644 tests/functional/exit_codes/fixtures.py create mode 100644 tests/functional/exit_codes/test_exit_codes.py create mode 100644 tests/functional/incremental_schema_tests/fixtures.py create mode 100644 tests/functional/incremental_schema_tests/test_incremental_schema.py create mode 100644 tests/functional/logging/test_logging.py create mode 100644 tests/functional/postgres/fixtures.py create mode 100644 tests/functional/postgres/test_postgres_indexes.py create mode 100644 tests/functional/relation_names/test_relation_name.py rename test/integration/044_run_operations_tests/macros/happy_macros.sql => tests/functional/run_operations/fixtures.py (82%) create mode 100644 tests/functional/run_operations/test_run_operations.py rename test/integration/030_statement_tests/seed/seed.csv => tests/functional/statements/fixtures.py (89%) create mode 100644 tests/functional/statements/test_statements.py create mode 100644 tests/functional/store_test_failures_tests/fixtures.py create mode 100644 tests/functional/store_test_failures_tests/test_store_test_failures.py create mode 100644 tests/functional/threading/test_thread_count.py diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 02ea0717225..3cdca1ad352 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.4.0a1 +current_version = 1.4.0b1 parse = (?P\d+) \.(?P\d+) \.(?P\d+) diff --git a/.changes/1.4.0-b1.md b/.changes/1.4.0-b1.md new file mode 100644 index 00000000000..b2a0e96827c --- /dev/null +++ b/.changes/1.4.0-b1.md @@ -0,0 +1,89 @@ +## dbt-core 1.4.0-b1 - December 15, 2022 + +### Features + +- Added favor-state flag to optionally favor state nodes even if unselected node exists ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. ([#5610](https://github.com/dbt-labs/dbt-core/issues/5610)) +- Friendlier error messages when packages.yml is malformed ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) +- Migrate dbt-utils current_timestamp macros into core + adapters ([#5521](https://github.com/dbt-labs/dbt-core/issues/5521)) +- Allow partitions in external tables to be supplied as a list ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) +- extend -f flag shorthand for seed command ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) +- This pulls the profile name from args when constructing a RuntimeConfig in lib.py, enabling the dbt-server to override the value that's in the dbt_project.yml ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) +- Adding tarball install method for packages. Allowing package tarball to be specified via url in the packages.yaml. ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) +- Added an md5 function to the base context ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) +- Exposures support metrics in lineage ([#6057](https://github.com/dbt-labs/dbt-core/issues/6057)) +- Add support for Python 3.11 ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) +- incremental predicates ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) + +### Fixes + +- Account for disabled flags on models in schema files more completely ([#3992](https://github.com/dbt-labs/dbt-core/issues/3992)) +- Add validation of enabled config for metrics, exposures and sources ([#6030](https://github.com/dbt-labs/dbt-core/issues/6030)) +- check length of args of python model function before accessing it ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) +- Add functors to ensure event types with str-type attributes are initialized to spec, even when provided non-str type params. ([#5436](https://github.com/dbt-labs/dbt-core/issues/5436)) +- Allow hooks to fail without halting execution flow ([#5625](https://github.com/dbt-labs/dbt-core/issues/5625)) +- Clarify Error Message for how many models are allowed in a Python file ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) +- After this, will be possible to use default values for dbt.config.get ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) +- Use full path for writing manifest ([#6055](https://github.com/dbt-labs/dbt-core/issues/6055)) +- [CT-1284] Change Python model default materialization to table ([#6345](https://github.com/dbt-labs/dbt-core/issues/6345)) +- Repair a regression which prevented basic logging before the logging subsystem is completely configured. ([#6434](https://github.com/dbt-labs/dbt-core/issues/6434)) + +### Docs + +- minor doc correction ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) +- Generate API docs for new CLI interface ([dbt-docs/#5528](https://github.com/dbt-labs/dbt-docs/issues/5528)) +- ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) +- Fix rendering of sample code for metrics ([dbt-docs/#323](https://github.com/dbt-labs/dbt-docs/issues/323)) +- Alphabetize `core/dbt/README.md` ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368)) + +### Under the Hood + +- Put black config in explicit config ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946)) +- Added flat_graph attribute the Manifest class's deepcopy() coverage ([#5809](https://github.com/dbt-labs/dbt-core/issues/5809)) +- Add mypy configs so `mypy` passes from CLI ([#5983](https://github.com/dbt-labs/dbt-core/issues/5983)) +- Exception message cleanup. ([#6023](https://github.com/dbt-labs/dbt-core/issues/6023)) +- Add dmypy cache to gitignore ([#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) +- Provide useful errors when the value of 'materialized' is invalid ([#5229](https://github.com/dbt-labs/dbt-core/issues/5229)) +- Clean up string formatting ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) +- Fixed extra whitespace in strings introduced by black. ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) +- Remove the 'root_path' field from most nodes ([#6171](https://github.com/dbt-labs/dbt-core/issues/6171)) +- Combine certain logging events with different levels ([#6173](https://github.com/dbt-labs/dbt-core/issues/6173)) +- Convert threading tests to pytest ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) +- Convert postgres index tests to pytest ([#5770](https://github.com/dbt-labs/dbt-core/issues/5770)) +- Convert use color tests to pytest ([#5771](https://github.com/dbt-labs/dbt-core/issues/5771)) +- Add github actions workflow to generate high level CLI API docs ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) +- Functionality-neutral refactor of event logging system to improve encapsulation and modularity. ([#6139](https://github.com/dbt-labs/dbt-core/issues/6139)) +- Consolidate ParsedNode and CompiledNode classes ([#6383](https://github.com/dbt-labs/dbt-core/issues/6383)) +- Prevent doc gen workflow from running on forks ([#6386](https://github.com/dbt-labs/dbt-core/issues/6386)) +- Fix intermittent database connection failure in Windows CI test ([#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) +- Refactor and clean up manifest nodes ([#6426](https://github.com/dbt-labs/dbt-core/issues/6426)) +- Restore important legacy logging behaviors, following refactor which removed them ([#6437](https://github.com/dbt-labs/dbt-core/issues/6437)) + +### Dependencies + +- Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core ([#5917](https://github.com/dbt-labs/dbt-core/pull/5917)) +- Bump black from 22.8.0 to 22.10.0 ([#6019](https://github.com/dbt-labs/dbt-core/pull/6019)) +- Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core ([#6108](https://github.com/dbt-labs/dbt-core/pull/6108)) +- Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core ([#6144](https://github.com/dbt-labs/dbt-core/pull/6144)) +- Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core ([#4904](https://github.com/dbt-labs/dbt-core/issues/4904)) + +### Contributors +- [@andy-clapson](https://github.com/andy-clapson) ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) +- [@chamini2](https://github.com/chamini2) ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) +- [@daniel-murray](https://github.com/daniel-murray) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- [@dave-connors-3](https://github.com/dave-connors-3) ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) +- [@dbeatty10](https://github.com/dbeatty10) ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368), [#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) +- [@devmessias](https://github.com/devmessias) ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) +- [@eve-johns](https://github.com/eve-johns) ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) +- [@haritamar](https://github.com/haritamar) ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) +- [@jared-rimmer](https://github.com/jared-rimmer) ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) +- [@josephberni](https://github.com/josephberni) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- [@joshuataylor](https://github.com/joshuataylor) ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) +- [@justbldwn](https://github.com/justbldwn) ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) +- [@luke-bassett](https://github.com/luke-bassett) ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) +- [@max-sixty](https://github.com/max-sixty) ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946), [#5983](https://github.com/dbt-labs/dbt-core/issues/5983), [#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) +- [@paulbenschmidt](https://github.com/paulbenschmidt) ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) +- [@pgoslatara](https://github.com/pgoslatara) ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) +- [@racheldaniel](https://github.com/racheldaniel) ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) +- [@timle2](https://github.com/timle2) ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) +- [@dave-connors-3](https://github.com/dave-connors-3) ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) diff --git a/.changes/1.4.0/Dependency-20220923-000646.yaml b/.changes/1.4.0/Dependency-20220923-000646.yaml new file mode 100644 index 00000000000..0375eeb125f --- /dev/null +++ b/.changes/1.4.0/Dependency-20220923-000646.yaml @@ -0,0 +1,6 @@ +kind: "Dependencies" +body: "Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core" +time: 2022-09-23T00:06:46.00000Z +custom: + Author: dependabot[bot] + PR: "5917" diff --git a/.changes/unreleased/Dependency-20221007-000848.yaml b/.changes/1.4.0/Dependency-20221007-000848.yaml similarity index 71% rename from .changes/unreleased/Dependency-20221007-000848.yaml rename to .changes/1.4.0/Dependency-20221007-000848.yaml index 8b2aebdc466..7e36733d14e 100644 --- a/.changes/unreleased/Dependency-20221007-000848.yaml +++ b/.changes/1.4.0/Dependency-20221007-000848.yaml @@ -1,7 +1,6 @@ -kind: "Dependency" +kind: "Dependencies" body: "Bump black from 22.8.0 to 22.10.0" time: 2022-10-07T00:08:48.00000Z custom: Author: dependabot[bot] - Issue: 4904 - PR: 6019 + PR: "6019" diff --git a/.changes/1.4.0/Dependency-20221020-000753.yaml b/.changes/1.4.0/Dependency-20221020-000753.yaml new file mode 100644 index 00000000000..ce0f122826b --- /dev/null +++ b/.changes/1.4.0/Dependency-20221020-000753.yaml @@ -0,0 +1,6 @@ +kind: "Dependencies" +body: "Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core" +time: 2022-10-20T00:07:53.00000Z +custom: + Author: dependabot[bot] + PR: "6108" diff --git a/.changes/1.4.0/Dependency-20221026-000910.yaml b/.changes/1.4.0/Dependency-20221026-000910.yaml new file mode 100644 index 00000000000..d68fa8a11ef --- /dev/null +++ b/.changes/1.4.0/Dependency-20221026-000910.yaml @@ -0,0 +1,6 @@ +kind: "Dependencies" +body: "Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core" +time: 2022-10-26T00:09:10.00000Z +custom: + Author: dependabot[bot] + PR: "6144" diff --git a/.changes/1.4.0/Dependency-20221205-002118.yaml b/.changes/1.4.0/Dependency-20221205-002118.yaml new file mode 100644 index 00000000000..f4203a5285c --- /dev/null +++ b/.changes/1.4.0/Dependency-20221205-002118.yaml @@ -0,0 +1,7 @@ +kind: "Dependencies" +body: "Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core" +time: 2022-12-05T00:21:18.00000Z +custom: + Author: dependabot[bot] + Issue: 4904 + PR: 6375 diff --git a/.changes/unreleased/Docs-20220908-154157.yaml b/.changes/1.4.0/Docs-20220908-154157.yaml similarity index 90% rename from .changes/unreleased/Docs-20220908-154157.yaml rename to .changes/1.4.0/Docs-20220908-154157.yaml index 2b2d30d41e5..e307f3bd5e0 100644 --- a/.changes/unreleased/Docs-20220908-154157.yaml +++ b/.changes/1.4.0/Docs-20220908-154157.yaml @@ -4,4 +4,3 @@ time: 2022-09-08T15:41:57.689162-04:00 custom: Author: andy-clapson Issue: "5791" - PR: "5684" diff --git a/.changes/unreleased/Docs-20221007-090656.yaml b/.changes/1.4.0/Docs-20221007-090656.yaml similarity index 91% rename from .changes/unreleased/Docs-20221007-090656.yaml rename to .changes/1.4.0/Docs-20221007-090656.yaml index 1159879a249..070ecd48944 100644 --- a/.changes/unreleased/Docs-20221007-090656.yaml +++ b/.changes/1.4.0/Docs-20221007-090656.yaml @@ -4,4 +4,3 @@ time: 2022-10-07T09:06:56.446078-05:00 custom: Author: stu-k Issue: "5528" - PR: "6022" diff --git a/.changes/1.4.0/Docs-20221017-171411.yaml b/.changes/1.4.0/Docs-20221017-171411.yaml new file mode 100644 index 00000000000..487362c1d5c --- /dev/null +++ b/.changes/1.4.0/Docs-20221017-171411.yaml @@ -0,0 +1,5 @@ +kind: Docs +time: 2022-10-17T17:14:11.715348-05:00 +custom: + Author: paulbenschmidt + Issue: "5880" diff --git a/.changes/1.4.0/Docs-20221116-155743.yaml b/.changes/1.4.0/Docs-20221116-155743.yaml new file mode 100644 index 00000000000..84d90a67b99 --- /dev/null +++ b/.changes/1.4.0/Docs-20221116-155743.yaml @@ -0,0 +1,6 @@ +kind: Docs +body: Fix rendering of sample code for metrics +time: 2022-11-16T15:57:43.204201+01:00 +custom: + Author: jtcohen6 + Issue: "323" diff --git a/.changes/1.4.0/Docs-20221202-150523.yaml b/.changes/1.4.0/Docs-20221202-150523.yaml new file mode 100644 index 00000000000..b08a32cddf6 --- /dev/null +++ b/.changes/1.4.0/Docs-20221202-150523.yaml @@ -0,0 +1,6 @@ +kind: Docs +body: Alphabetize `core/dbt/README.md` +time: 2022-12-02T15:05:23.695333-07:00 +custom: + Author: dbeatty10 + Issue: "6368" diff --git a/.changes/1.4.0/Features-20220408-165459.yaml b/.changes/1.4.0/Features-20220408-165459.yaml new file mode 100644 index 00000000000..12cdf74c757 --- /dev/null +++ b/.changes/1.4.0/Features-20220408-165459.yaml @@ -0,0 +1,7 @@ +kind: Features +body: Added favor-state flag to optionally favor state nodes even if unselected node + exists +time: 2022-04-08T16:54:59.696564+01:00 +custom: + Author: daniel-murray josephberni + Issue: "2968" diff --git a/.changes/1.4.0/Features-20220817-154857.yaml b/.changes/1.4.0/Features-20220817-154857.yaml new file mode 100644 index 00000000000..ad53df05a3f --- /dev/null +++ b/.changes/1.4.0/Features-20220817-154857.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. +time: 2022-08-17T15:48:57.225267-04:00 +custom: + Author: gshank + Issue: "5610" diff --git a/.changes/1.4.0/Features-20220823-085727.yaml b/.changes/1.4.0/Features-20220823-085727.yaml new file mode 100644 index 00000000000..4d8daebbf5e --- /dev/null +++ b/.changes/1.4.0/Features-20220823-085727.yaml @@ -0,0 +1,7 @@ +kind: Features +body: incremental predicates +time: 2022-08-23T08:57:27.640804-05:00 +custom: + Author: dave-connors-3 + Issue: "5680" + PR: "5702" diff --git a/.changes/unreleased/Features-20220912-125935.yaml b/.changes/1.4.0/Features-20220912-125935.yaml similarity index 92% rename from .changes/unreleased/Features-20220912-125935.yaml rename to .changes/1.4.0/Features-20220912-125935.yaml index b0c1dd41a26..d49f35fd0af 100644 --- a/.changes/unreleased/Features-20220912-125935.yaml +++ b/.changes/1.4.0/Features-20220912-125935.yaml @@ -4,4 +4,3 @@ time: 2022-09-12T12:59:35.121188+01:00 custom: Author: jared-rimmer Issue: "5486" - PR: "5812" diff --git a/.changes/unreleased/Features-20220914-095625.yaml b/.changes/1.4.0/Features-20220914-095625.yaml similarity index 93% rename from .changes/unreleased/Features-20220914-095625.yaml rename to .changes/1.4.0/Features-20220914-095625.yaml index 51828084a0d..d46b1bfa8d8 100644 --- a/.changes/unreleased/Features-20220914-095625.yaml +++ b/.changes/1.4.0/Features-20220914-095625.yaml @@ -4,4 +4,3 @@ time: 2022-09-14T09:56:25.97818-07:00 custom: Author: colin-rogers-dbt Issue: "5521" - PR: "5838" diff --git a/.changes/unreleased/Features-20220925-211651.yaml b/.changes/1.4.0/Features-20220925-211651.yaml similarity index 92% rename from .changes/unreleased/Features-20220925-211651.yaml rename to .changes/1.4.0/Features-20220925-211651.yaml index 0f0f6e84213..d2c1911c720 100644 --- a/.changes/unreleased/Features-20220925-211651.yaml +++ b/.changes/1.4.0/Features-20220925-211651.yaml @@ -4,4 +4,3 @@ time: 2022-09-25T21:16:51.051239654+02:00 custom: Author: pgoslatara Issue: "5929" - PR: "5930" diff --git a/.changes/unreleased/Features-20221003-110705.yaml b/.changes/1.4.0/Features-20221003-110705.yaml similarity index 92% rename from .changes/unreleased/Features-20221003-110705.yaml rename to .changes/1.4.0/Features-20221003-110705.yaml index f8142666c3b..637d8be58c6 100644 --- a/.changes/unreleased/Features-20221003-110705.yaml +++ b/.changes/1.4.0/Features-20221003-110705.yaml @@ -4,4 +4,3 @@ time: 2022-10-03T11:07:05.381632-05:00 custom: Author: dave-connors-3 Issue: "5990" - PR: "5991" diff --git a/.changes/1.4.0/Features-20221102-150003.yaml b/.changes/1.4.0/Features-20221102-150003.yaml new file mode 100644 index 00000000000..9d8ba192687 --- /dev/null +++ b/.changes/1.4.0/Features-20221102-150003.yaml @@ -0,0 +1,7 @@ +kind: Features +body: This pulls the profile name from args when constructing a RuntimeConfig in lib.py, + enabling the dbt-server to override the value that's in the dbt_project.yml +time: 2022-11-02T15:00:03.000805-05:00 +custom: + Author: racheldaniel + Issue: "6201" diff --git a/.changes/1.4.0/Features-20221107-105018.yaml b/.changes/1.4.0/Features-20221107-105018.yaml new file mode 100644 index 00000000000..db6a0ab753a --- /dev/null +++ b/.changes/1.4.0/Features-20221107-105018.yaml @@ -0,0 +1,8 @@ +kind: Features +body: Adding tarball install method for packages. Allowing package tarball to be specified + via url in the packages.yaml. +time: 2022-11-07T10:50:18.464545-05:00 +custom: + Author: timle2 + Issue: "4205" + PR: "4689" diff --git a/.changes/1.4.0/Features-20221114-185207.yaml b/.changes/1.4.0/Features-20221114-185207.yaml new file mode 100644 index 00000000000..459bc8ce234 --- /dev/null +++ b/.changes/1.4.0/Features-20221114-185207.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Added an md5 function to the base context +time: 2022-11-14T18:52:07.788593+02:00 +custom: + Author: haritamar + Issue: "6246" diff --git a/.changes/1.4.0/Features-20221130-112913.yaml b/.changes/1.4.0/Features-20221130-112913.yaml new file mode 100644 index 00000000000..64832de2f68 --- /dev/null +++ b/.changes/1.4.0/Features-20221130-112913.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Exposures support metrics in lineage +time: 2022-11-30T11:29:13.256034-05:00 +custom: + Author: michelleark + Issue: "6057" diff --git a/.changes/1.4.0/Features-20221206-150704.yaml b/.changes/1.4.0/Features-20221206-150704.yaml new file mode 100644 index 00000000000..47939ea5a79 --- /dev/null +++ b/.changes/1.4.0/Features-20221206-150704.yaml @@ -0,0 +1,7 @@ +kind: Features +body: Add support for Python 3.11 +time: 2022-12-06T15:07:04.753127+01:00 +custom: + Author: joshuataylor MichelleArk jtcohen6 + Issue: "6147" + PR: "6326" diff --git a/.changes/unreleased/Fixes-20220916-104854.yaml b/.changes/1.4.0/Fixes-20220916-104854.yaml similarity index 92% rename from .changes/unreleased/Fixes-20220916-104854.yaml rename to .changes/1.4.0/Fixes-20220916-104854.yaml index 64e76c43a3f..bd9af0469a7 100644 --- a/.changes/unreleased/Fixes-20220916-104854.yaml +++ b/.changes/1.4.0/Fixes-20220916-104854.yaml @@ -4,4 +4,3 @@ time: 2022-09-16T10:48:54.162273-05:00 custom: Author: emmyoop Issue: "3992" - PR: "5868" diff --git a/.changes/unreleased/Fixes-20221010-113218.yaml b/.changes/1.4.0/Fixes-20221010-113218.yaml similarity index 92% rename from .changes/unreleased/Fixes-20221010-113218.yaml rename to .changes/1.4.0/Fixes-20221010-113218.yaml index 73f128ec5b7..5b73b8d9ccd 100644 --- a/.changes/unreleased/Fixes-20221010-113218.yaml +++ b/.changes/1.4.0/Fixes-20221010-113218.yaml @@ -4,4 +4,3 @@ time: 2022-10-10T11:32:18.752322-05:00 custom: Author: emmyoop Issue: "6030" - PR: "6038" diff --git a/.changes/unreleased/Fixes-20221011-160715.yaml b/.changes/1.4.0/Fixes-20221011-160715.yaml similarity index 92% rename from .changes/unreleased/Fixes-20221011-160715.yaml rename to .changes/1.4.0/Fixes-20221011-160715.yaml index 273e1398bdd..936546a5232 100644 --- a/.changes/unreleased/Fixes-20221011-160715.yaml +++ b/.changes/1.4.0/Fixes-20221011-160715.yaml @@ -4,4 +4,3 @@ time: 2022-10-11T16:07:15.464093-04:00 custom: Author: chamini2 Issue: "6041" - PR: "6042" diff --git a/.changes/1.4.0/Fixes-20221016-173742.yaml b/.changes/1.4.0/Fixes-20221016-173742.yaml new file mode 100644 index 00000000000..c7b00dddba8 --- /dev/null +++ b/.changes/1.4.0/Fixes-20221016-173742.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Add functors to ensure event types with str-type attributes are initialized + to spec, even when provided non-str type params. +time: 2022-10-16T17:37:42.846683-07:00 +custom: + Author: versusfacit + Issue: "5436" diff --git a/.changes/1.4.0/Fixes-20221107-095314.yaml b/.changes/1.4.0/Fixes-20221107-095314.yaml new file mode 100644 index 00000000000..99da9c44522 --- /dev/null +++ b/.changes/1.4.0/Fixes-20221107-095314.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Allow hooks to fail without halting execution flow +time: 2022-11-07T09:53:14.340257-06:00 +custom: + Author: ChenyuLInx + Issue: "5625" diff --git a/.changes/1.4.0/Fixes-20221115-081021.yaml b/.changes/1.4.0/Fixes-20221115-081021.yaml new file mode 100644 index 00000000000..40c81fabacb --- /dev/null +++ b/.changes/1.4.0/Fixes-20221115-081021.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Clarify Error Message for how many models are allowed in a Python file +time: 2022-11-15T08:10:21.527884-05:00 +custom: + Author: justbldwn + Issue: "6245" diff --git a/.changes/1.4.0/Fixes-20221124-163419.yaml b/.changes/1.4.0/Fixes-20221124-163419.yaml new file mode 100644 index 00000000000..010a073269a --- /dev/null +++ b/.changes/1.4.0/Fixes-20221124-163419.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: After this, will be possible to use default values for dbt.config.get +time: 2022-11-24T16:34:19.039512764-03:00 +custom: + Author: devmessias + Issue: "6309" + PR: "6317" diff --git a/.changes/1.4.0/Fixes-20221202-164859.yaml b/.changes/1.4.0/Fixes-20221202-164859.yaml new file mode 100644 index 00000000000..6aad4ced192 --- /dev/null +++ b/.changes/1.4.0/Fixes-20221202-164859.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Use full path for writing manifest +time: 2022-12-02T16:48:59.029519-05:00 +custom: + Author: gshank + Issue: "6055" diff --git a/.changes/1.4.0/Fixes-20221213-112620.yaml b/.changes/1.4.0/Fixes-20221213-112620.yaml new file mode 100644 index 00000000000..a2220f9a920 --- /dev/null +++ b/.changes/1.4.0/Fixes-20221213-112620.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: '[CT-1284] Change Python model default materialization to table' +time: 2022-12-13T11:26:20.550017-08:00 +custom: + Author: aranke + Issue: "6345" diff --git a/.changes/1.4.0/Fixes-20221214-155307.yaml b/.changes/1.4.0/Fixes-20221214-155307.yaml new file mode 100644 index 00000000000..cb37e0a809c --- /dev/null +++ b/.changes/1.4.0/Fixes-20221214-155307.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Repair a regression which prevented basic logging before the logging subsystem + is completely configured. +time: 2022-12-14T15:53:07.396512-05:00 +custom: + Author: peterallenwebb + Issue: "6434" diff --git a/.changes/unreleased/Under the Hood-20220927-194259.yaml b/.changes/1.4.0/Under the Hood-20220927-194259.yaml similarity index 91% rename from .changes/unreleased/Under the Hood-20220927-194259.yaml rename to .changes/1.4.0/Under the Hood-20220927-194259.yaml index dbd85165e2c..b6cb64b0155 100644 --- a/.changes/unreleased/Under the Hood-20220927-194259.yaml +++ b/.changes/1.4.0/Under the Hood-20220927-194259.yaml @@ -4,4 +4,3 @@ time: 2022-09-27T19:42:59.241433-07:00 custom: Author: max-sixty Issue: "5946" - PR: "5947" diff --git a/.changes/unreleased/Under the Hood-20220929-134406.yaml b/.changes/1.4.0/Under the Hood-20220929-134406.yaml similarity index 93% rename from .changes/unreleased/Under the Hood-20220929-134406.yaml rename to .changes/1.4.0/Under the Hood-20220929-134406.yaml index ce69bdf322a..b0175190747 100644 --- a/.changes/unreleased/Under the Hood-20220929-134406.yaml +++ b/.changes/1.4.0/Under the Hood-20220929-134406.yaml @@ -4,4 +4,3 @@ time: 2022-09-29T13:44:06.275941-04:00 custom: Author: peterallenwebb Issue: "5809" - PR: "5975" diff --git a/.changes/unreleased/Under the Hood-20221005-120310.yaml b/.changes/1.4.0/Under the Hood-20221005-120310.yaml similarity index 92% rename from .changes/unreleased/Under the Hood-20221005-120310.yaml rename to .changes/1.4.0/Under the Hood-20221005-120310.yaml index eb87a14fedc..797be31c319 100644 --- a/.changes/unreleased/Under the Hood-20221005-120310.yaml +++ b/.changes/1.4.0/Under the Hood-20221005-120310.yaml @@ -4,4 +4,3 @@ time: 2022-10-05T12:03:10.061263-07:00 custom: Author: max-sixty Issue: "5983" - PR: "5983" diff --git a/.changes/unreleased/Under the Hood-20221007-094627.yaml b/.changes/1.4.0/Under the Hood-20221007-094627.yaml similarity index 91% rename from .changes/unreleased/Under the Hood-20221007-094627.yaml rename to .changes/1.4.0/Under the Hood-20221007-094627.yaml index 950c20577ed..d3a5da61566 100644 --- a/.changes/unreleased/Under the Hood-20221007-094627.yaml +++ b/.changes/1.4.0/Under the Hood-20221007-094627.yaml @@ -4,4 +4,3 @@ time: 2022-10-07T09:46:27.682872-05:00 custom: Author: emmyoop Issue: "6023" - PR: "6024" diff --git a/.changes/unreleased/Under the Hood-20221007-140044.yaml b/.changes/1.4.0/Under the Hood-20221007-140044.yaml similarity index 91% rename from .changes/unreleased/Under the Hood-20221007-140044.yaml rename to .changes/1.4.0/Under the Hood-20221007-140044.yaml index b41e3f6eb5a..971d5a40ce8 100644 --- a/.changes/unreleased/Under the Hood-20221007-140044.yaml +++ b/.changes/1.4.0/Under the Hood-20221007-140044.yaml @@ -4,4 +4,3 @@ time: 2022-10-07T14:00:44.227644-07:00 custom: Author: max-sixty Issue: "6028" - PR: "5978" diff --git a/.changes/unreleased/Under the Hood-20221013-181912.yaml b/.changes/1.4.0/Under the Hood-20221013-181912.yaml similarity index 93% rename from .changes/unreleased/Under the Hood-20221013-181912.yaml rename to .changes/1.4.0/Under the Hood-20221013-181912.yaml index 2f03b9b29ff..4f5218891b4 100644 --- a/.changes/unreleased/Under the Hood-20221013-181912.yaml +++ b/.changes/1.4.0/Under the Hood-20221013-181912.yaml @@ -4,4 +4,3 @@ time: 2022-10-13T18:19:12.167548-04:00 custom: Author: peterallenwebb Issue: "5229" - PR: "6025" diff --git a/.changes/1.4.0/Under the Hood-20221017-151511.yaml b/.changes/1.4.0/Under the Hood-20221017-151511.yaml new file mode 100644 index 00000000000..94f4d27d6de --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221017-151511.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Fixed extra whitespace in strings introduced by black. +time: 2022-10-17T15:15:11.499246-05:00 +custom: + Author: luke-bassett + Issue: "1350" diff --git a/.changes/1.4.0/Under the Hood-20221017-155844.yaml b/.changes/1.4.0/Under the Hood-20221017-155844.yaml new file mode 100644 index 00000000000..c46ef040410 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221017-155844.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Clean up string formatting +time: 2022-10-17T15:58:44.676549-04:00 +custom: + Author: eve-johns + Issue: "6068" diff --git a/.changes/1.4.0/Under the Hood-20221028-104837.yaml b/.changes/1.4.0/Under the Hood-20221028-104837.yaml new file mode 100644 index 00000000000..446d4898920 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221028-104837.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Remove the 'root_path' field from most nodes +time: 2022-10-28T10:48:37.687886-04:00 +custom: + Author: gshank + Issue: "6171" diff --git a/.changes/1.4.0/Under the Hood-20221028-110344.yaml b/.changes/1.4.0/Under the Hood-20221028-110344.yaml new file mode 100644 index 00000000000..cbe8dacb3d5 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221028-110344.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Combine certain logging events with different levels +time: 2022-10-28T11:03:44.887836-04:00 +custom: + Author: gshank + Issue: "6173" diff --git a/.changes/1.4.0/Under the Hood-20221108-074550.yaml b/.changes/1.4.0/Under the Hood-20221108-074550.yaml new file mode 100644 index 00000000000..a8fbc7e208b --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221108-074550.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Convert threading tests to pytest +time: 2022-11-08T07:45:50.589147-06:00 +custom: + Author: stu-k + Issue: "5942" diff --git a/.changes/1.4.0/Under the Hood-20221108-115633.yaml b/.changes/1.4.0/Under the Hood-20221108-115633.yaml new file mode 100644 index 00000000000..ea073719cda --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221108-115633.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Convert postgres index tests to pytest +time: 2022-11-08T11:56:33.743042-06:00 +custom: + Author: stu-k + Issue: "5770" diff --git a/.changes/1.4.0/Under the Hood-20221108-133104.yaml b/.changes/1.4.0/Under the Hood-20221108-133104.yaml new file mode 100644 index 00000000000..6829dc097eb --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221108-133104.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Convert use color tests to pytest +time: 2022-11-08T13:31:04.788547-06:00 +custom: + Author: stu-k + Issue: "5771" diff --git a/.changes/1.4.0/Under the Hood-20221116-130037.yaml b/.changes/1.4.0/Under the Hood-20221116-130037.yaml new file mode 100644 index 00000000000..ecdedd6bd2d --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221116-130037.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Add github actions workflow to generate high level CLI API docs +time: 2022-11-16T13:00:37.916202-06:00 +custom: + Author: stu-k + Issue: "5942" diff --git a/.changes/1.4.0/Under the Hood-20221118-145717.yaml b/.changes/1.4.0/Under the Hood-20221118-145717.yaml new file mode 100644 index 00000000000..934cd9dd5cb --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221118-145717.yaml @@ -0,0 +1,8 @@ +kind: Under the Hood +body: Functionality-neutral refactor of event logging system to improve encapsulation + and modularity. +time: 2022-11-18T14:57:17.792622-05:00 +custom: + Author: peterallenwebb + Issue: "6139" + PR: "6291" diff --git a/.changes/1.4.0/Under the Hood-20221205-164948.yaml b/.changes/1.4.0/Under the Hood-20221205-164948.yaml new file mode 100644 index 00000000000..579f973955b --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221205-164948.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Consolidate ParsedNode and CompiledNode classes +time: 2022-12-05T16:49:48.563583-05:00 +custom: + Author: gshank + Issue: "6383" + PR: "6384" diff --git a/.changes/1.4.0/Under the Hood-20221206-094015.yaml b/.changes/1.4.0/Under the Hood-20221206-094015.yaml new file mode 100644 index 00000000000..ebcb9999430 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221206-094015.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Prevent doc gen workflow from running on forks +time: 2022-12-06T09:40:15.301984-06:00 +custom: + Author: stu-k + Issue: "6386" + PR: "6390" diff --git a/.changes/1.4.0/Under the Hood-20221206-113053.yaml b/.changes/1.4.0/Under the Hood-20221206-113053.yaml new file mode 100644 index 00000000000..a1f94f68f43 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221206-113053.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Fix intermittent database connection failure in Windows CI test +time: 2022-12-06T11:30:53.166009-07:00 +custom: + Author: MichelleArk dbeatty10 + Issue: "6394" + PR: "6395" diff --git a/.changes/1.4.0/Under the Hood-20221211-214240.yaml b/.changes/1.4.0/Under the Hood-20221211-214240.yaml new file mode 100644 index 00000000000..adeaefba257 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221211-214240.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Refactor and clean up manifest nodes +time: 2022-12-11T21:42:40.560074-05:00 +custom: + Author: gshank + Issue: "6426" + PR: "6427" diff --git a/.changes/1.4.0/Under the Hood-20221213-214106.yaml b/.changes/1.4.0/Under the Hood-20221213-214106.yaml new file mode 100644 index 00000000000..708c84661d6 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221213-214106.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Restore important legacy logging behaviors, following refactor which removed + them +time: 2022-12-13T21:41:06.815133-05:00 +custom: + Author: peterallenwebb + Issue: "6437" diff --git a/.changes/unreleased/Breaking Changes-20221205-141937.yaml b/.changes/unreleased/Breaking Changes-20221205-141937.yaml new file mode 100644 index 00000000000..be840b20a99 --- /dev/null +++ b/.changes/unreleased/Breaking Changes-20221205-141937.yaml @@ -0,0 +1,9 @@ +kind: Breaking Changes +body: Cleaned up exceptions to directly raise in code. Removed use of all exception + functions in the code base and marked them all as deprecated to be removed next + minor release. +time: 2022-12-05T14:19:37.863032-06:00 +custom: + Author: emmyoop + Issue: "6339" + PR: "6347" diff --git a/.changes/unreleased/Features-20220817-154857.yaml b/.changes/unreleased/Features-20220817-154857.yaml deleted file mode 100644 index c8c0cd9c036..00000000000 --- a/.changes/unreleased/Features-20220817-154857.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: Proto logging messages -time: 2022-08-17T15:48:57.225267-04:00 -custom: - Author: gshank - Issue: "5610" - PR: "5643" diff --git a/.changes/unreleased/Fixes-20221117-220320.yaml b/.changes/unreleased/Fixes-20221117-220320.yaml new file mode 100644 index 00000000000..2f71fe213fc --- /dev/null +++ b/.changes/unreleased/Fixes-20221117-220320.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Fix typo in util.py +time: 2022-11-17T22:03:20.4836855+09:00 +custom: + Author: eltociear + Issue: "4904" + PR: "6037" diff --git a/.changes/unreleased/Fixes-20221213-113915.yaml b/.changes/unreleased/Fixes-20221213-113915.yaml new file mode 100644 index 00000000000..b92a2d6cbc9 --- /dev/null +++ b/.changes/unreleased/Fixes-20221213-113915.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: '[CT-1591] Don''t parse empty Python files' +time: 2022-12-13T11:39:15.818464-08:00 +custom: + Author: aranke + Issue: "6345" diff --git a/.changes/unreleased/Under the Hood-20221219-193435.yaml b/.changes/unreleased/Under the Hood-20221219-193435.yaml new file mode 100644 index 00000000000..82388dbb759 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221219-193435.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Treat dense text blobs as binary for `git grep` +time: 2022-12-19T19:34:35.890275-07:00 +custom: + Author: dbeatty10 + Issue: "6294" diff --git a/.changie.yaml b/.changie.yaml index 0744c5bb9c7..e417244506b 100644 --- a/.changie.yaml +++ b/.changie.yaml @@ -6,19 +6,67 @@ changelogPath: CHANGELOG.md versionExt: md versionFormat: '## dbt-core {{.Version}} - {{.Time.Format "January 02, 2006"}}' kindFormat: '### {{.Kind}}' -changeFormat: '- {{.Body}} ([#{{.Custom.Issue}}](https://github.com/dbt-labs/dbt-core/issues/{{.Custom.Issue}}), [#{{.Custom.PR}}](https://github.com/dbt-labs/dbt-core/pull/{{.Custom.PR}}))' +changeFormat: |- + {{- $IssueList := list }} + {{- $changes := splitList " " $.Custom.Issue }} + {{- range $issueNbr := $changes }} + {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }} + {{- $IssueList = append $IssueList $changeLink }} + {{- end -}} + - {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}}) kinds: - label: Breaking Changes - label: Features - label: Fixes - label: Docs - changeFormat: '- {{.Body}} ([dbt-docs/#{{.Custom.Issue}}](https://github.com/dbt-labs/dbt-docs/issues/{{.Custom.Issue}}), [dbt-docs/#{{.Custom.PR}}](https://github.com/dbt-labs/dbt-docs/pull/{{.Custom.PR}}))' + changeFormat: |- + {{- $IssueList := list }} + {{- $changes := splitList " " $.Custom.Issue }} + {{- range $issueNbr := $changes }} + {{- $changeLink := "[dbt-docs/#nbr](https://github.com/dbt-labs/dbt-docs/issues/nbr)" | replace "nbr" $issueNbr }} + {{- $IssueList = append $IssueList $changeLink }} + {{- end -}} + - {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}}) - label: Under the Hood - label: Dependencies - changeFormat: '- {{.Body}} ({{if ne .Custom.Issue ""}}[#{{.Custom.Issue}}](https://github.com/dbt-labs/dbt-core/issues/{{.Custom.Issue}}), {{end}}[#{{.Custom.PR}}](https://github.com/dbt-labs/dbt-core/pull/{{.Custom.PR}}))' + changeFormat: |- + {{- $PRList := list }} + {{- $changes := splitList " " $.Custom.PR }} + {{- range $pullrequest := $changes }} + {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }} + {{- $PRList = append $PRList $changeLink }} + {{- end -}} + - {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}}) + skipGlobalChoices: true + additionalChoices: + - key: Author + label: GitHub Username(s) (separated by a single space if multiple) + type: string + minLength: 3 + - key: PR + label: GitHub Pull Request Number (separated by a single space if multiple) + type: string + minLength: 1 - label: Security - changeFormat: '- {{.Body}} ({{if ne .Custom.Issue ""}}[#{{.Custom.Issue}}](https://github.com/dbt-labs/dbt-core/issues/{{.Custom.Issue}}), {{end}}[#{{.Custom.PR}}](https://github.com/dbt-labs/dbt-core/pull/{{.Custom.PR}}))' + changeFormat: |- + {{- $PRList := list }} + {{- $changes := splitList " " $.Custom.PR }} + {{- range $pullrequest := $changes }} + {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }} + {{- $PRList = append $PRList $changeLink }} + {{- end -}} + - {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}}) + skipGlobalChoices: true + additionalChoices: + - key: Author + label: GitHub Username(s) (separated by a single space if multiple) + type: string + minLength: 3 + - key: PR + label: GitHub Pull Request Number (separated by a single space if multiple) + type: string + minLength: 1 newlines: afterChangelogHeader: 1 @@ -33,42 +81,41 @@ custom: type: string minLength: 3 - key: Issue - label: GitHub Issue Number - type: int - minInt: 1 -- key: PR - label: GitHub Pull Request Number - type: int - minInt: 1 + label: GitHub Issue Number (separated by a single space if multiple) + type: string + minLength: 1 footerFormat: | {{- $contributorDict := dict }} {{- /* any names added to this list should be all lowercase for later matching purposes */}} - {{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }} + {{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "aranke" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }} {{- range $change := .Changes }} {{- $authorList := splitList " " $change.Custom.Author }} - {{- /* loop through all authors for a PR */}} + {{- /* loop through all authors for a single changelog */}} {{- range $author := $authorList }} {{- $authorLower := lower $author }} {{- /* we only want to include non-core team contributors */}} {{- if not (has $authorLower $core_team)}} - {{- /* Docs kind link back to dbt-docs instead of dbt-core PRs */}} - {{- $prLink := $change.Kind }} - {{- if eq $change.Kind "Docs" }} - {{- $prLink = "[dbt-docs/#pr](https://github.com/dbt-labs/dbt-docs/pull/pr)" | replace "pr" $change.Custom.PR }} - {{- else }} - {{- $prLink = "[#pr](https://github.com/dbt-labs/dbt-core/pull/pr)" | replace "pr" $change.Custom.PR }} - {{- end }} - {{- /* check if this contributor has other PRs associated with them already */}} - {{- if hasKey $contributorDict $author }} - {{- $prList := get $contributorDict $author }} - {{- $prList = append $prList $prLink }} - {{- $contributorDict := set $contributorDict $author $prList }} - {{- else }} - {{- $prList := list $prLink }} - {{- $contributorDict := set $contributorDict $author $prList }} - {{- end }} - {{- end}} + {{- $changeList := splitList " " $change.Custom.Author }} + {{- /* Docs kind link back to dbt-docs instead of dbt-core issues */}} + {{- $changeLink := $change.Kind }} + {{- if or (eq $change.Kind "Dependencies") (eq $change.Kind "Security") }} + {{- $changeLink = "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $change.Custom.PR }} + {{- else if eq $change.Kind "Docs"}} + {{- $changeLink = "[dbt-docs/#nbr](https://github.com/dbt-labs/dbt-docs/issues/nbr)" | replace "nbr" $change.Custom.Issue }} + {{- else }} + {{- $changeLink = "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $change.Custom.Issue }} + {{- end }} + {{- /* check if this contributor has other changes associated with them already */}} + {{- if hasKey $contributorDict $author }} + {{- $contributionList := get $contributorDict $author }} + {{- $contributionList = append $contributionList $changeLink }} + {{- $contributorDict := set $contributorDict $author $contributionList }} + {{- else }} + {{- $contributionList := list $changeLink }} + {{- $contributorDict := set $contributorDict $author $contributionList }} + {{- end }} + {{- end}} {{- end}} {{- end }} {{- /* no indentation here for formatting so the final markdown doesn't have unneeded indentations */}} diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000000..ff6cbc4608f --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +core/dbt/include/index.html binary +tests/functional/artifacts/data/state/*/manifest.json binary diff --git a/.github/workflows/bot-changelog.yml b/.github/workflows/bot-changelog.yml index 2d06fafe682..c6d2a1507a3 100644 --- a/.github/workflows/bot-changelog.yml +++ b/.github/workflows/bot-changelog.yml @@ -40,7 +40,7 @@ jobs: matrix: include: - label: "dependencies" - changie_kind: "Dependency" + changie_kind: "Dependencies" - label: "snyk" changie_kind: "Security" runs-on: ubuntu-latest @@ -58,4 +58,4 @@ jobs: commit_message: "Add automated changelog yaml from template for bot PR" changie_kind: ${{ matrix.changie_kind }} label: ${{ matrix.label }} - custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n Issue: 4904\n PR: ${{ github.event.pull_request.number }}" + custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n PR: ${{ github.event.pull_request.number }}" diff --git a/.github/workflows/generate-cli-api-docs.yml b/.github/workflows/generate-cli-api-docs.yml new file mode 100644 index 00000000000..bc079499b83 --- /dev/null +++ b/.github/workflows/generate-cli-api-docs.yml @@ -0,0 +1,165 @@ +# **what?** +# On push, if anything in core/dbt/docs or core/dbt/cli has been +# created or modified, regenerate the CLI API docs using sphinx. + +# **why?** +# We watch for changes in core/dbt/cli because the CLI API docs rely on click +# and all supporting flags/params to be generated. We watch for changes in +# core/dbt/docs since any changes to sphinx configuration or any of the +# .rst files there could result in a differently build final index.html file. + +# **when?** +# Whenever a change has been pushed to a branch, and only if there is a diff +# between the PR branch and main's core/dbt/cli and or core/dbt/docs dirs. + +# TODO: add bot comment to PR informing contributor that the docs have been committed +# TODO: figure out why github action triggered pushes cause github to fail to report +# the status of jobs + +name: Generate CLI API docs + +on: + pull_request: + +permissions: + contents: write + pull-requests: write + +env: + CLI_DIR: ${{ github.workspace }}/core/dbt/cli + DOCS_DIR: ${{ github.workspace }}/core/dbt/docs + DOCS_BUILD_DIR: ${{ github.workspace }}/core/dbt/docs/build + +jobs: + check_gen: + name: check if generation needed + runs-on: ubuntu-latest + if: ${{ github.event.pull_request.head.repo.fork == false }} + outputs: + cli_dir_changed: ${{ steps.check_cli.outputs.cli_dir_changed }} + docs_dir_changed: ${{ steps.check_docs.outputs.docs_dir_changed }} + + steps: + - name: "[DEBUG] print variables" + run: | + echo "env.CLI_DIR: ${{ env.CLI_DIR }}" + echo "env.DOCS_BUILD_DIR: ${{ env.DOCS_BUILD_DIR }}" + echo "env.DOCS_DIR: ${{ env.DOCS_DIR }}" + + - name: git checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ github.head_ref }} + + - name: set shas + id: set_shas + run: | + THIS_SHA=$(git rev-parse @) + LAST_SHA=$(git rev-parse @~1) + + echo "this sha: $THIS_SHA" + echo "last sha: $LAST_SHA" + + echo "this_sha=$THIS_SHA" >> $GITHUB_OUTPUT + echo "last_sha=$LAST_SHA" >> $GITHUB_OUTPUT + + - name: check for changes in core/dbt/cli + id: check_cli + run: | + CLI_DIR_CHANGES=$(git diff \ + ${{ steps.set_shas.outputs.last_sha }} \ + ${{ steps.set_shas.outputs.this_sha }} \ + -- ${{ env.CLI_DIR }}) + + if [ -n "$CLI_DIR_CHANGES" ]; then + echo "changes found" + echo $CLI_DIR_CHANGES + echo "cli_dir_changed=true" >> $GITHUB_OUTPUT + exit 0 + fi + echo "cli_dir_changed=false" >> $GITHUB_OUTPUT + echo "no changes found" + + - name: check for changes in core/dbt/docs + id: check_docs + if: steps.check_cli.outputs.cli_dir_changed == 'false' + run: | + DOCS_DIR_CHANGES=$(git diff --name-only \ + ${{ steps.set_shas.outputs.last_sha }} \ + ${{ steps.set_shas.outputs.this_sha }} \ + -- ${{ env.DOCS_DIR }} ':!${{ env.DOCS_BUILD_DIR }}') + + DOCS_BUILD_DIR_CHANGES=$(git diff --name-only \ + ${{ steps.set_shas.outputs.last_sha }} \ + ${{ steps.set_shas.outputs.this_sha }} \ + -- ${{ env.DOCS_BUILD_DIR }}) + + if [ -n "$DOCS_DIR_CHANGES" ] && [ -z "$DOCS_BUILD_DIR_CHANGES" ]; then + echo "changes found" + echo $DOCS_DIR_CHANGES + echo "docs_dir_changed=true" >> $GITHUB_OUTPUT + exit 0 + fi + echo "docs_dir_changed=false" >> $GITHUB_OUTPUT + echo "no changes found" + + gen_docs: + name: generate docs + runs-on: ubuntu-latest + needs: [check_gen] + if: | + needs.check_gen.outputs.cli_dir_changed == 'true' + || needs.check_gen.outputs.docs_dir_changed == 'true' + + steps: + - name: "[DEBUG] print variables" + run: | + echo "env.DOCS_DIR: ${{ env.DOCS_DIR }}" + echo "github head_ref: ${{ github.head_ref }}" + + - name: git checkout + uses: actions/checkout@v3 + with: + ref: ${{ github.head_ref }} + + - name: install python + uses: actions/setup-python@v4.3.0 + with: + python-version: 3.8 + + - name: install dev requirements + run: | + python3 -m venv env + source env/bin/activate + python -m pip install --upgrade pip + pip install -r requirements.txt -r dev-requirements.txt + + - name: generate docs + run: | + source env/bin/activate + cd ${{ env.DOCS_DIR }} + + echo "cleaning existing docs" + make clean + + echo "creating docs" + make html + + - name: debug + run: | + echo ">>>>> status" + git status + echo ">>>>> remotes" + git remote -v + echo ">>>>> branch" + git branch -v + echo ">>>>> log" + git log --pretty=oneline | head -5 + + - name: commit docs + run: | + git config user.name 'Github Build Bot' + git config user.email 'buildbot@fishtownanalytics.com' + git commit -am "Add generated CLI API docs" + git push -u origin ${{ github.head_ref }} diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 257935419c8..8138b730d34 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -73,7 +73,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.7", "3.8", "3.9", "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] env: TOXENV: "unit" @@ -118,7 +118,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.7", "3.8", "3.9", "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] os: [ubuntu-20.04] include: - python-version: 3.8 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index f09533b8b36..d902340a91b 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,13 +9,4 @@ permissions: jobs: stale: - runs-on: ubuntu-latest - steps: - # pinned at v4 (https://github.com/actions/stale/releases/tag/v4.0.0) - - uses: actions/stale@cdf15f641adb27a71842045a94023bef6945e3aa - with: - stale-issue-message: "This issue has been marked as Stale because it has been open for 180 days with no activity. If you would like the issue to remain open, please remove the stale label or comment on the issue, or it will be closed in 7 days." - stale-pr-message: "This PR has been marked as Stale because it has been open for 180 days with no activity. If you would like the PR to remain open, please remove the stale label or comment on the PR, or it will be closed in 7 days." - close-issue-message: "Although we are closing this issue as stale, it's not gone forever. Issues can be reopened if there is renewed community interest; add a comment to notify the maintainers." - # mark issues/PRs stale when they haven't seen activity in 180 days - days-before-stale: 180 + uses: dbt-labs/actions/.github/workflows/stale-bot-matrix.yml@main diff --git a/.gitignore b/.gitignore index ac91d49c9c4..dc9996305d3 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ __pycache__/ env*/ dbt_env/ build/ +!core/dbt/docs/build develop-eggs/ dist/ downloads/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6877497ae37..ce9847cf454 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ # Eventually the hooks described here will be run as tests before merging each PR. # TODO: remove global exclusion of tests when testing overhaul is complete -exclude: ^test/ +exclude: ^(test/|core/dbt/docs/build/) # Force all unspecified python hooks to run python 3.8 default_language_version: diff --git a/CHANGELOG.md b/CHANGELOG.md index 039de921800..4a91696f68b 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,96 @@ - "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version. - Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry) +## dbt-core 1.4.0-b1 - December 15, 2022 + +### Features + +- Added favor-state flag to optionally favor state nodes even if unselected node exists ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. ([#5610](https://github.com/dbt-labs/dbt-core/issues/5610)) +- Friendlier error messages when packages.yml is malformed ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) +- Migrate dbt-utils current_timestamp macros into core + adapters ([#5521](https://github.com/dbt-labs/dbt-core/issues/5521)) +- Allow partitions in external tables to be supplied as a list ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) +- extend -f flag shorthand for seed command ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) +- This pulls the profile name from args when constructing a RuntimeConfig in lib.py, enabling the dbt-server to override the value that's in the dbt_project.yml ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) +- Adding tarball install method for packages. Allowing package tarball to be specified via url in the packages.yaml. ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) +- Added an md5 function to the base context ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) +- Exposures support metrics in lineage ([#6057](https://github.com/dbt-labs/dbt-core/issues/6057)) +- Add support for Python 3.11 ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) +- incremental predicates ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) + +### Fixes + +- Account for disabled flags on models in schema files more completely ([#3992](https://github.com/dbt-labs/dbt-core/issues/3992)) +- Add validation of enabled config for metrics, exposures and sources ([#6030](https://github.com/dbt-labs/dbt-core/issues/6030)) +- check length of args of python model function before accessing it ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) +- Add functors to ensure event types with str-type attributes are initialized to spec, even when provided non-str type params. ([#5436](https://github.com/dbt-labs/dbt-core/issues/5436)) +- Allow hooks to fail without halting execution flow ([#5625](https://github.com/dbt-labs/dbt-core/issues/5625)) +- Clarify Error Message for how many models are allowed in a Python file ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) +- After this, will be possible to use default values for dbt.config.get ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) +- Use full path for writing manifest ([#6055](https://github.com/dbt-labs/dbt-core/issues/6055)) +- [CT-1284] Change Python model default materialization to table ([#6345](https://github.com/dbt-labs/dbt-core/issues/6345)) +- Repair a regression which prevented basic logging before the logging subsystem is completely configured. ([#6434](https://github.com/dbt-labs/dbt-core/issues/6434)) + +### Docs + +- minor doc correction ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) +- Generate API docs for new CLI interface ([dbt-docs/#5528](https://github.com/dbt-labs/dbt-docs/issues/5528)) +- ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) +- Fix rendering of sample code for metrics ([dbt-docs/#323](https://github.com/dbt-labs/dbt-docs/issues/323)) +- Alphabetize `core/dbt/README.md` ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368)) + +### Under the Hood + +- Put black config in explicit config ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946)) +- Added flat_graph attribute the Manifest class's deepcopy() coverage ([#5809](https://github.com/dbt-labs/dbt-core/issues/5809)) +- Add mypy configs so `mypy` passes from CLI ([#5983](https://github.com/dbt-labs/dbt-core/issues/5983)) +- Exception message cleanup. ([#6023](https://github.com/dbt-labs/dbt-core/issues/6023)) +- Add dmypy cache to gitignore ([#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) +- Provide useful errors when the value of 'materialized' is invalid ([#5229](https://github.com/dbt-labs/dbt-core/issues/5229)) +- Clean up string formatting ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) +- Fixed extra whitespace in strings introduced by black. ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) +- Remove the 'root_path' field from most nodes ([#6171](https://github.com/dbt-labs/dbt-core/issues/6171)) +- Combine certain logging events with different levels ([#6173](https://github.com/dbt-labs/dbt-core/issues/6173)) +- Convert threading tests to pytest ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) +- Convert postgres index tests to pytest ([#5770](https://github.com/dbt-labs/dbt-core/issues/5770)) +- Convert use color tests to pytest ([#5771](https://github.com/dbt-labs/dbt-core/issues/5771)) +- Add github actions workflow to generate high level CLI API docs ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) +- Functionality-neutral refactor of event logging system to improve encapsulation and modularity. ([#6139](https://github.com/dbt-labs/dbt-core/issues/6139)) +- Consolidate ParsedNode and CompiledNode classes ([#6383](https://github.com/dbt-labs/dbt-core/issues/6383)) +- Prevent doc gen workflow from running on forks ([#6386](https://github.com/dbt-labs/dbt-core/issues/6386)) +- Fix intermittent database connection failure in Windows CI test ([#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) +- Refactor and clean up manifest nodes ([#6426](https://github.com/dbt-labs/dbt-core/issues/6426)) +- Restore important legacy logging behaviors, following refactor which removed them ([#6437](https://github.com/dbt-labs/dbt-core/issues/6437)) + +### Dependencies + +- Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core ([#5917](https://github.com/dbt-labs/dbt-core/pull/5917)) +- Bump black from 22.8.0 to 22.10.0 ([#6019](https://github.com/dbt-labs/dbt-core/pull/6019)) +- Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core ([#6108](https://github.com/dbt-labs/dbt-core/pull/6108)) +- Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core ([#6144](https://github.com/dbt-labs/dbt-core/pull/6144)) +- Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core ([#4904](https://github.com/dbt-labs/dbt-core/issues/4904)) + +### Contributors +- [@andy-clapson](https://github.com/andy-clapson) ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) +- [@chamini2](https://github.com/chamini2) ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) +- [@daniel-murray](https://github.com/daniel-murray) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- [@dave-connors-3](https://github.com/dave-connors-3) ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) +- [@dbeatty10](https://github.com/dbeatty10) ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368), [#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) +- [@devmessias](https://github.com/devmessias) ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) +- [@eve-johns](https://github.com/eve-johns) ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) +- [@haritamar](https://github.com/haritamar) ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) +- [@jared-rimmer](https://github.com/jared-rimmer) ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) +- [@josephberni](https://github.com/josephberni) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- [@joshuataylor](https://github.com/joshuataylor) ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) +- [@justbldwn](https://github.com/justbldwn) ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) +- [@luke-bassett](https://github.com/luke-bassett) ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) +- [@max-sixty](https://github.com/max-sixty) ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946), [#5983](https://github.com/dbt-labs/dbt-core/issues/5983), [#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) +- [@paulbenschmidt](https://github.com/paulbenschmidt) ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) +- [@pgoslatara](https://github.com/pgoslatara) ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) +- [@racheldaniel](https://github.com/racheldaniel) ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) +- [@timle2](https://github.com/timle2) ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) +- [@dave-connors-3](https://github.com/dave-connors-3) ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) + ## Previous Releases diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index efbb0a726ad..3bbd8d14d5f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -56,7 +56,7 @@ There are some tools that will be helpful to you in developing locally. While th These are the tools used in `dbt-core` development and testing: -- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.7, 3.8, 3.9, and 3.10 +- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.7, 3.8, 3.9, 3.10 and 3.11 - [`pytest`](https://docs.pytest.org/en/latest/) to define, discover, and run tests - [`flake8`](https://flake8.pycqa.org/en/latest/) for code linting - [`black`](https://github.com/psf/black) for code formatting @@ -160,7 +160,7 @@ suites. #### `tox` -[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run tests. You can also run tests in parallel, for example, you can run unit tests for Python 3.7, Python 3.8, Python 3.9, and Python 3.10 checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py37`. The configuration for these tests in located in `tox.ini`. +[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run tests. You can also run tests in parallel, for example, you can run unit tests for Python 3.7, Python 3.8, Python 3.9, Python 3.10 and Python 3.11 checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py37`. The configuration for these tests in located in `tox.ini`. #### `pytest` @@ -201,13 +201,21 @@ Here are some general rules for adding tests: * Sometimes flake8 complains about lines that are actually fine, in which case you can put a comment on the line such as: # noqa or # noqa: ANNN, where ANNN is the error code that flake8 issues. * To collect output for `CProfile`, run dbt with the `-r` option and the name of an output file, i.e. `dbt -r dbt.cprof run`. If you just want to profile parsing, you can do: `dbt -r dbt.cprof parse`. `pip` install `snakeviz` to view the output. Run `snakeviz dbt.cprof` and output will be rendered in a browser window. -## Adding a CHANGELOG Entry +## Adding or modifying a CHANGELOG Entry We use [changie](https://changie.dev) to generate `CHANGELOG` entries. **Note:** Do not edit the `CHANGELOG.md` directly. Your modifications will be lost. Follow the steps to [install `changie`](https://changie.dev/guide/installation/) for your system. -Once changie is installed and your PR is created, simply run `changie new` and changie will walk you through the process of creating a changelog entry. Commit the file that's created and your changelog entry is complete! +Once changie is installed and your PR is created for a new feature, simply run the following command and changie will walk you through the process of creating a changelog entry: + +```shell +changie new +``` + +Commit the file that's created and your changelog entry is complete! + +If you are contributing to a feature already in progress, you will modify the changie yaml file in dbt/.changes/unreleased/ related to your change. If you need help finding this file, please ask within the discussion for the pull request! You don't need to worry about which `dbt-core` version your change will go into. Just create the changelog entry with `changie`, and open your PR against the `main` branch. All merged changes will be included in the next minor version of `dbt-core`. The Core maintainers _may_ choose to "backport" specific changes in order to patch older minor versions. In that case, a maintainer will take care of that backport after merging your PR, before releasing the new version of `dbt-core`. diff --git a/Dockerfile.test b/Dockerfile.test index eb6ba824bcb..b5a373270dd 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -49,6 +49,9 @@ RUN apt-get update \ python3.10 \ python3.10-dev \ python3.10-venv \ + python3.11 \ + python3.11-dev \ + python3.11-venv \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* diff --git a/core/dbt/README.md b/core/dbt/README.md index 5886bf37525..79123a95f47 100644 --- a/core/dbt/README.md +++ b/core/dbt/README.md @@ -2,50 +2,59 @@ ## The following are individual files in this directory. -### deprecations.py - -### flags.py +### compilation.py -### main.py +### constants.py -### tracking.py +### dataclass_schema.py -### version.py +### deprecations.py -### lib.py +### exceptions.py -### node_types.py +### flags.py ### helper_types.py +### hooks.py + +### lib.py + ### links.py -### semver.py +### logger.py -### ui.py +### main.py -### compilation.py +### node_types.py -### dataclass_schema.py +### profiler.py -### exceptions.py +### selected_resources.py -### hooks.py +### semver.py -### logger.py +### tracking.py -### profiler.py +### ui.py ### utils.py +### version.py + ## The subdirectories will be documented in a README in the subdirectory -* config -* include * adapters +* cli +* clients +* config * context +* contracts * deps +* docs +* events * graph +* include +* parser * task -* clients -* events +* tests diff --git a/core/dbt/adapters/base/connections.py b/core/dbt/adapters/base/connections.py index 5fd3769aa74..577cdf6d9a6 100644 --- a/core/dbt/adapters/base/connections.py +++ b/core/dbt/adapters/base/connections.py @@ -41,13 +41,14 @@ from dbt.events.types import ( NewConnection, ConnectionReused, + ConnectionLeftOpenInCleanup, ConnectionLeftOpen, - ConnectionLeftOpen2, + ConnectionClosedInCleanup, ConnectionClosed, - ConnectionClosed2, Rollback, RollbackFailed, ) +from dbt.events.contextvars import get_node_info from dbt import flags from dbt.utils import cast_to_str @@ -169,7 +170,9 @@ def set_connection_name(self, name: Optional[str] = None) -> Connection: if conn.name == conn_name and conn.state == "open": return conn - fire_event(NewConnection(conn_name=conn_name, conn_type=self.TYPE)) + fire_event( + NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info()) + ) if conn.state == "open": fire_event(ConnectionReused(conn_name=conn_name)) @@ -306,9 +309,9 @@ def cleanup_all(self) -> None: with self.lock: for connection in self.thread_connections.values(): if connection.state not in {"closed", "init"}: - fire_event(ConnectionLeftOpen(conn_name=cast_to_str(connection.name))) + fire_event(ConnectionLeftOpenInCleanup(conn_name=cast_to_str(connection.name))) else: - fire_event(ConnectionClosed(conn_name=cast_to_str(connection.name))) + fire_event(ConnectionClosedInCleanup(conn_name=cast_to_str(connection.name))) self.close(connection) # garbage collect these connections @@ -336,7 +339,9 @@ def _rollback_handle(cls, connection: Connection) -> None: except Exception: fire_event( RollbackFailed( - conn_name=cast_to_str(connection.name), exc_info=traceback.format_exc() + conn_name=cast_to_str(connection.name), + exc_info=traceback.format_exc(), + node_info=get_node_info(), ) ) @@ -345,10 +350,16 @@ def _close_handle(cls, connection: Connection) -> None: """Perform the actual close operation.""" # On windows, sometimes connection handles don't have a close() attr. if hasattr(connection.handle, "close"): - fire_event(ConnectionClosed2(conn_name=cast_to_str(connection.name))) + fire_event( + ConnectionClosed(conn_name=cast_to_str(connection.name), node_info=get_node_info()) + ) connection.handle.close() else: - fire_event(ConnectionLeftOpen2(conn_name=cast_to_str(connection.name))) + fire_event( + ConnectionLeftOpen( + conn_name=cast_to_str(connection.name), node_info=get_node_info() + ) + ) @classmethod def _rollback(cls, connection: Connection) -> None: @@ -359,7 +370,7 @@ def _rollback(cls, connection: Connection) -> None: f'"{connection.name}", but it does not have one open!' ) - fire_event(Rollback(conn_name=cast_to_str(connection.name))) + fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info())) cls._rollback_handle(connection) connection.transaction_open = False @@ -371,7 +382,7 @@ def close(cls, connection: Connection) -> Connection: return connection if connection.transaction_open and connection.handle: - fire_event(Rollback(conn_name=cast_to_str(connection.name))) + fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info())) cls._rollback_handle(connection) connection.transaction_open = False diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py index 3c301c2e7f4..64ebbeac5dd 100644 --- a/core/dbt/adapters/base/impl.py +++ b/core/dbt/adapters/base/impl.py @@ -15,7 +15,6 @@ List, Mapping, Iterator, - Union, Set, ) @@ -23,13 +22,20 @@ import pytz from dbt.exceptions import ( - raise_database_error, - raise_compiler_error, - invalid_type_error, - get_relation_returned_multiple_results, InternalException, + InvalidMacroArgType, + InvalidMacroResult, + InvalidQuoteConfigType, NotImplementedException, + NullRelationCacheAttempted, + NullRelationDropAttempted, + RelationReturnedMultipleResults, + RenameToNoneAttempted, RuntimeException, + SnapshotTargetIncomplete, + SnapshotTargetNotSnapshotTable, + UnexpectedNull, + UnexpectedNonTimestamp, ) from dbt.adapters.protocol import ( @@ -38,16 +44,15 @@ ) from dbt.clients.agate_helper import empty_table, merge_tables, table_from_rows from dbt.clients.jinja import MacroGenerator -from dbt.contracts.graph.compiled import CompileResultNode, CompiledSeedNode from dbt.contracts.graph.manifest import Manifest, MacroManifest -from dbt.contracts.graph.parsed import ParsedSeedNode -from dbt.exceptions import warn_or_error -from dbt.events.functions import fire_event +from dbt.contracts.graph.nodes import ResultNode +from dbt.events.functions import fire_event, warn_or_error from dbt.events.types import ( CacheMiss, ListRelations, CodeExecution, CodeExecutionStatus, + CatalogGenerationError, ) from dbt.utils import filter_null_values, executor, cast_to_str @@ -64,9 +69,6 @@ from dbt.adapters.cache import RelationsCache, _make_ref_key_msg -SeedModel = Union[ParsedSeedNode, CompiledSeedNode] - - GET_CATALOG_MACRO_NAME = "get_catalog" FRESHNESS_MACRO_NAME = "collect_freshness" @@ -102,18 +104,10 @@ def _utc(dt: Optional[datetime], source: BaseRelation, field_name: str) -> datet assume the datetime is already for UTC and add the timezone. """ if dt is None: - raise raise_database_error( - "Expected a non-null value when querying field '{}' of table " - " {} but received value 'null' instead".format(field_name, source) - ) + raise UnexpectedNull(field_name, source) elif not hasattr(dt, "tzinfo"): - raise raise_database_error( - "Expected a timestamp value when querying field '{}' of table " - "{} but received value of type '{}' instead".format( - field_name, source, type(dt).__name__ - ) - ) + raise UnexpectedNonTimestamp(field_name, source, dt) elif dt.tzinfo: return dt.astimezone(pytz.UTC) @@ -243,9 +237,7 @@ def nice_connection_name(self) -> str: return conn.name @contextmanager - def connection_named( - self, name: str, node: Optional[CompileResultNode] = None - ) -> Iterator[None]: + def connection_named(self, name: str, node: Optional[ResultNode] = None) -> Iterator[None]: try: if self.connections.query_header is not None: self.connections.query_header.set(name, node) @@ -257,7 +249,7 @@ def connection_named( self.connections.query_header.reset() @contextmanager - def connection_for(self, node: CompileResultNode) -> Iterator[None]: + def connection_for(self, node: ResultNode) -> Iterator[None]: with self.connection_named(node.unique_id, node): yield @@ -372,7 +364,7 @@ def _get_catalog_schemas(self, manifest: Manifest) -> SchemaSearchMap: lowercase strings. """ info_schema_name_map = SchemaSearchMap() - nodes: Iterator[CompileResultNode] = chain( + nodes: Iterator[ResultNode] = chain( [ node for node in manifest.nodes.values() @@ -441,7 +433,7 @@ def cache_added(self, relation: Optional[BaseRelation]) -> str: """Cache a new relation in dbt. It will show up in `list relations`.""" if relation is None: name = self.nice_connection_name() - raise_compiler_error("Attempted to cache a null relation for {}".format(name)) + raise NullRelationCacheAttempted(name) self.cache.add(relation) # so jinja doesn't render things return "" @@ -453,7 +445,7 @@ def cache_dropped(self, relation: Optional[BaseRelation]) -> str: """ if relation is None: name = self.nice_connection_name() - raise_compiler_error("Attempted to drop a null relation for {}".format(name)) + raise NullRelationDropAttempted(name) self.cache.drop(relation) return "" @@ -470,9 +462,7 @@ def cache_renamed( name = self.nice_connection_name() src_name = _relation_name(from_relation) dst_name = _relation_name(to_relation) - raise_compiler_error( - "Attempted to rename {} to {} for {}".format(src_name, dst_name, name) - ) + raise RenameToNoneAttempted(src_name, dst_name, name) self.cache.rename(from_relation, to_relation) return "" @@ -622,7 +612,7 @@ def get_missing_columns( to_relation. """ if not isinstance(from_relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="get_missing_columns", arg_name="from_relation", got_value=from_relation, @@ -630,7 +620,7 @@ def get_missing_columns( ) if not isinstance(to_relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="get_missing_columns", arg_name="to_relation", got_value=to_relation, @@ -655,7 +645,7 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None: incorrect. """ if not isinstance(relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="valid_snapshot_target", arg_name="relation", got_value=relation, @@ -676,24 +666,16 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None: if missing: if extra: - msg = ( - 'Snapshot target has ("{}") but not ("{}") - is it an ' - "unmigrated previous version archive?".format( - '", "'.join(extra), '", "'.join(missing) - ) - ) + raise SnapshotTargetIncomplete(extra, missing) else: - msg = 'Snapshot target is not a snapshot table (missing "{}")'.format( - '", "'.join(missing) - ) - raise_compiler_error(msg) + raise SnapshotTargetNotSnapshotTable(missing) @available.parse_none def expand_target_column_types( self, from_relation: BaseRelation, to_relation: BaseRelation ) -> None: if not isinstance(from_relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="expand_target_column_types", arg_name="from_relation", got_value=from_relation, @@ -701,7 +683,7 @@ def expand_target_column_types( ) if not isinstance(to_relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="expand_target_column_types", arg_name="to_relation", got_value=to_relation, @@ -783,7 +765,7 @@ def get_relation(self, database: str, schema: str, identifier: str) -> Optional[ "schema": schema, "database": database, } - get_relation_returned_multiple_results(kwargs, matches) + raise RelationReturnedMultipleResults(kwargs, matches) elif matches: return matches[0] @@ -847,10 +829,7 @@ def quote_seed_column(self, column: str, quote_config: Optional[bool]) -> str: elif quote_config is None: pass else: - raise_compiler_error( - f'The seed configuration value of "quote_columns" has an ' - f"invalid type {type(quote_config)}" - ) + raise InvalidQuoteConfigType(quote_config) if quote_columns: return self.quote(column) @@ -1100,11 +1079,7 @@ def calculate_freshness( # now we have a 1-row table of the maximum `loaded_at_field` value and # the current time according to the db. if len(table) != 1 or len(table[0]) != 2: - raise_compiler_error( - 'Got an invalid result from "{}" macro: {}'.format( - FRESHNESS_MACRO_NAME, [tuple(r) for r in table] - ) - ) + raise InvalidMacroResult(FRESHNESS_MACRO_NAME, table) if table[0][0] is None: # no records in the table, so really the max_loaded_at was # infinitely long ago. Just call it 0:00 January 1 year UTC @@ -1327,7 +1302,7 @@ def catch_as_completed( elif isinstance(exc, KeyboardInterrupt) or not isinstance(exc, Exception): raise exc else: - warn_or_error(f"Encountered an error while generating catalog: {str(exc)}") + warn_or_error(CatalogGenerationError(exc=str(exc))) # exc is not None, derives from Exception, and isn't ctrl+c exceptions.append(exc) return merge_tables(tables), exceptions diff --git a/core/dbt/adapters/base/query_headers.py b/core/dbt/adapters/base/query_headers.py index 26f34be9c93..dd88fdb2d41 100644 --- a/core/dbt/adapters/base/query_headers.py +++ b/core/dbt/adapters/base/query_headers.py @@ -5,7 +5,7 @@ from dbt.context.manifest import generate_query_header_context from dbt.contracts.connection import AdapterRequiredConfig, QueryComment -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.graph.manifest import Manifest from dbt.exceptions import RuntimeException @@ -90,7 +90,7 @@ def add(self, sql: str) -> str: def reset(self): self.set("master", None) - def set(self, name: str, node: Optional[CompileResultNode]): + def set(self, name: str, node: Optional[ResultNode]): wrapped: Optional[NodeWrapper] = None if node is not None: wrapped = NodeWrapper(node) diff --git a/core/dbt/adapters/base/relation.py b/core/dbt/adapters/base/relation.py index 3124384975a..5bc0c56b264 100644 --- a/core/dbt/adapters/base/relation.py +++ b/core/dbt/adapters/base/relation.py @@ -1,9 +1,8 @@ from collections.abc import Hashable -from dataclasses import dataclass -from typing import Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set +from dataclasses import dataclass, field +from typing import Optional, TypeVar, Any, Type, Dict, Iterator, Tuple, Set -from dbt.contracts.graph.compiled import CompiledNode -from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedNode +from dbt.contracts.graph.nodes import SourceDefinition, ManifestNode, ResultNode, ParsedNode from dbt.contracts.relation import ( RelationType, ComponentName, @@ -12,7 +11,7 @@ Policy, Path, ) -from dbt.exceptions import InternalException +from dbt.exceptions import ApproximateMatch, InternalException, MultipleDatabasesNotAllowed from dbt.node_types import NodeType from dbt.utils import filter_null_values, deep_merge, classproperty @@ -27,8 +26,10 @@ class BaseRelation(FakeAPIObject, Hashable): path: Path type: Optional[RelationType] = None quote_character: str = '"' - include_policy: Policy = Policy() - quote_policy: Policy = Policy() + # Python 3.11 requires that these use default_factory instead of simple default + # ValueError: mutable default for field include_policy is not allowed: use default_factory + include_policy: Policy = field(default_factory=lambda: Policy()) + quote_policy: Policy = field(default_factory=lambda: Policy()) dbt_created: bool = False def _is_exactish_match(self, field: ComponentName, value: str) -> bool: @@ -39,9 +40,9 @@ def _is_exactish_match(self, field: ComponentName, value: str) -> bool: @classmethod def _get_field_named(cls, field_name): - for field, _ in cls._get_fields(): - if field.name == field_name: - return field + for f, _ in cls._get_fields(): + if f.name == field_name: + return f # this should be unreachable raise ValueError(f"BaseRelation has no {field_name} field!") @@ -52,11 +53,11 @@ def __eq__(self, other): @classmethod def get_default_quote_policy(cls) -> Policy: - return cls._get_field_named("quote_policy").default + return cls._get_field_named("quote_policy").default_factory() @classmethod def get_default_include_policy(cls) -> Policy: - return cls._get_field_named("include_policy").default + return cls._get_field_named("include_policy").default_factory() def get(self, key, default=None): """Override `.get` to return a metadata object so we don't break @@ -99,7 +100,7 @@ def matches( if approximate_match and not exact_match: target = self.create(database=database, schema=schema, identifier=identifier) - dbt.exceptions.approximate_relation_match(target, self) + raise ApproximateMatch(target, self) return exact_match @@ -184,7 +185,7 @@ def quoted(self, identifier): ) @classmethod - def create_from_source(cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any) -> Self: + def create_from_source(cls: Type[Self], source: SourceDefinition, **kwargs: Any) -> Self: source_quoting = source.quoting.to_dict(omit_none=True) source_quoting.pop("column", None) quote_policy = deep_merge( @@ -209,7 +210,7 @@ def add_ephemeral_prefix(name: str): def create_ephemeral_from_node( cls: Type[Self], config: HasQuoting, - node: Union[ParsedNode, CompiledNode], + node: ManifestNode, ) -> Self: # Note that ephemeral models are based on the name. identifier = cls.add_ephemeral_prefix(node.name) @@ -222,7 +223,7 @@ def create_ephemeral_from_node( def create_from_node( cls: Type[Self], config: HasQuoting, - node: Union[ParsedNode, CompiledNode], + node: ManifestNode, quote_policy: Optional[Dict[str, bool]] = None, **kwargs: Any, ) -> Self: @@ -243,20 +244,20 @@ def create_from_node( def create_from( cls: Type[Self], config: HasQuoting, - node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition], + node: ResultNode, **kwargs: Any, ) -> Self: if node.resource_type == NodeType.Source: - if not isinstance(node, ParsedSourceDefinition): + if not isinstance(node, SourceDefinition): raise InternalException( - "type mismatch, expected ParsedSourceDefinition but got {}".format(type(node)) + "type mismatch, expected SourceDefinition but got {}".format(type(node)) ) return cls.create_from_source(node, **kwargs) else: - if not isinstance(node, (ParsedNode, CompiledNode)): + # Can't use ManifestNode here because of parameterized generics + if not isinstance(node, (ParsedNode)): raise InternalException( - "type mismatch, expected ParsedNode or CompiledNode but " - "got {}".format(type(node)) + f"type mismatch, expected ManifestNode but got {type(node)}" ) return cls.create_from_node(config, node, **kwargs) @@ -437,7 +438,7 @@ def flatten(self, allow_multiple_databases: bool = False): if not allow_multiple_databases: seen = {r.database.lower() for r in self if r.database} if len(seen) > 1: - dbt.exceptions.raise_compiler_error(str(seen)) + raise MultipleDatabasesNotAllowed(seen) for information_schema_name, schema in self.search(): path = {"database": information_schema_name.database, "schema": schema} diff --git a/core/dbt/adapters/cache.py b/core/dbt/adapters/cache.py index 6c60039f262..90c4cab27fb 100644 --- a/core/dbt/adapters/cache.py +++ b/core/dbt/adapters/cache.py @@ -1,4 +1,3 @@ -import re import threading from copy import deepcopy from typing import Any, Dict, Iterable, List, Optional, Set, Tuple @@ -9,7 +8,13 @@ _make_msg_from_ref_key, _ReferenceKey, ) -import dbt.exceptions +from dbt.exceptions import ( + DependentLinkNotCached, + NewNameAlreadyInCache, + NoneRelationFound, + ReferencedLinkNotCached, + TruncatedModelNameCausedCollision, +) from dbt.events.functions import fire_event, fire_event_if from dbt.events.types import ( AddLink, @@ -150,11 +155,7 @@ def rename_key(self, old_key, new_key): :raises InternalError: If the new key already exists. """ if new_key in self.referenced_by: - dbt.exceptions.raise_cache_inconsistent( - 'in rename of "{}" -> "{}", new name is in the cache already'.format( - old_key, new_key - ) - ) + raise NewNameAlreadyInCache(old_key, new_key) if old_key not in self.referenced_by: return @@ -270,15 +271,11 @@ def _add_link(self, referenced_key, dependent_key): if referenced is None: return if referenced is None: - dbt.exceptions.raise_cache_inconsistent( - "in add_link, referenced link key {} not in cache!".format(referenced_key) - ) + raise ReferencedLinkNotCached(referenced_key) dependent = self.relations.get(dependent_key) if dependent is None: - dbt.exceptions.raise_cache_inconsistent( - "in add_link, dependent link key {} not in cache!".format(dependent_key) - ) + raise DependentLinkNotCached(dependent_key) assert dependent is not None # we just raised! @@ -430,24 +427,7 @@ def _check_rename_constraints(self, old_key, new_key): if new_key in self.relations: # Tell user when collision caused by model names truncated during # materialization. - match = re.search("__dbt_backup|__dbt_tmp$", new_key.identifier) - if match: - truncated_model_name_prefix = new_key.identifier[: match.start()] - message_addendum = ( - "\n\nName collisions can occur when the length of two " - "models' names approach your database's builtin limit. " - "Try restructuring your project such that no two models " - "share the prefix '{}'.".format(truncated_model_name_prefix) - + " Then, clean your warehouse of any removed models." - ) - else: - message_addendum = "" - - dbt.exceptions.raise_cache_inconsistent( - "in rename, new key {} already in cache: {}{}".format( - new_key, list(self.relations.keys()), message_addendum - ) - ) + raise TruncatedModelNameCausedCollision(new_key, self.relations) if old_key not in self.relations: fire_event(TemporaryRelation(key=_make_msg_from_ref_key(old_key))) @@ -505,9 +485,7 @@ def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[ ] if None in results: - dbt.exceptions.raise_cache_inconsistent( - "in get_relations, a None relation was found in the cache!" - ) + raise NoneRelationFound() return results def clear(self): diff --git a/core/dbt/adapters/protocol.py b/core/dbt/adapters/protocol.py index f17c2bd6f45..13b9bd79968 100644 --- a/core/dbt/adapters/protocol.py +++ b/core/dbt/adapters/protocol.py @@ -8,7 +8,6 @@ Generic, TypeVar, Tuple, - Union, Dict, Any, ) @@ -17,8 +16,7 @@ import agate from dbt.contracts.connection import Connection, AdapterRequiredConfig, AdapterResponse -from dbt.contracts.graph.compiled import CompiledNode, ManifestNode, NonSourceCompiledNode -from dbt.contracts.graph.parsed import ParsedNode, ParsedSourceDefinition +from dbt.contracts.graph.nodes import ResultNode, ManifestNode from dbt.contracts.graph.model_config import BaseConfig from dbt.contracts.graph.manifest import Manifest from dbt.contracts.relation import Policy, HasQuoting @@ -48,11 +46,7 @@ def get_default_quote_policy(cls) -> Policy: ... @classmethod - def create_from( - cls: Type[Self], - config: HasQuoting, - node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition], - ) -> Self: + def create_from(cls: Type[Self], config: HasQuoting, node: ResultNode) -> Self: ... @@ -65,7 +59,7 @@ def compile_node( node: ManifestNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, - ) -> NonSourceCompiledNode: + ) -> ManifestNode: ... diff --git a/core/dbt/adapters/sql/connections.py b/core/dbt/adapters/sql/connections.py index f8928a37651..bc1a562ad86 100644 --- a/core/dbt/adapters/sql/connections.py +++ b/core/dbt/adapters/sql/connections.py @@ -10,6 +10,7 @@ from dbt.contracts.connection import Connection, ConnectionState, AdapterResponse from dbt.events.functions import fire_event from dbt.events.types import ConnectionUsed, SQLQuery, SQLCommit, SQLQueryStatus +from dbt.events.contextvars import get_node_info from dbt.utils import cast_to_str @@ -56,7 +57,13 @@ def add_query( connection = self.get_thread_connection() if auto_begin and connection.transaction_open is False: self.begin() - fire_event(ConnectionUsed(conn_type=self.TYPE, conn_name=cast_to_str(connection.name))) + fire_event( + ConnectionUsed( + conn_type=self.TYPE, + conn_name=cast_to_str(connection.name), + node_info=get_node_info(), + ) + ) with self.exception_handler(sql): if abridge_sql_log: @@ -64,7 +71,11 @@ def add_query( else: log_sql = sql - fire_event(SQLQuery(conn_name=cast_to_str(connection.name), sql=log_sql)) + fire_event( + SQLQuery( + conn_name=cast_to_str(connection.name), sql=log_sql, node_info=get_node_info() + ) + ) pre = time.time() cursor = connection.handle.cursor() @@ -72,7 +83,9 @@ def add_query( fire_event( SQLQueryStatus( - status=str(self.get_response(cursor)), elapsed=round((time.time() - pre), 2) + status=str(self.get_response(cursor)), + elapsed=round((time.time() - pre)), + node_info=get_node_info(), ) ) @@ -156,7 +169,7 @@ def commit(self): "it does not have one open!".format(connection.name) ) - fire_event(SQLCommit(conn_name=connection.name)) + fire_event(SQLCommit(conn_name=connection.name, node_info=get_node_info())) self.add_commit_query() connection.transaction_open = False diff --git a/core/dbt/adapters/sql/impl.py b/core/dbt/adapters/sql/impl.py index 20241d9e53d..4606b046f54 100644 --- a/core/dbt/adapters/sql/impl.py +++ b/core/dbt/adapters/sql/impl.py @@ -1,9 +1,8 @@ import agate from typing import Any, Optional, Tuple, Type, List -import dbt.clients.agate_helper from dbt.contracts.connection import Connection -import dbt.exceptions +from dbt.exceptions import RelationTypeNull from dbt.adapters.base import BaseAdapter, available from dbt.adapters.cache import _make_ref_key_msg from dbt.adapters.sql import SQLConnectionManager @@ -132,9 +131,7 @@ def alter_column_type(self, relation, column_name, new_column_type) -> None: def drop_relation(self, relation): if relation.type is None: - dbt.exceptions.raise_compiler_error( - "Tried to drop relation {}, but its type is null.".format(relation) - ) + raise RelationTypeNull(relation) self.cache_dropped(relation) self.execute_macro(DROP_RELATION_MACRO_NAME, kwargs={"relation": relation}) diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index ce160fb8011..5292f795665 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -62,7 +62,6 @@ def invoke(self, args: List[str]) -> Tuple[Optional[List], bool]: @p.cache_selected_only @p.debug @p.enable_legacy_logger -@p.event_buffer_size @p.fail_fast @p.log_cache_events @p.log_format diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index a4119426895..7795fb9d218 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -80,14 +80,6 @@ hidden=True, ) -event_buffer_size = click.option( - "--event-buffer-size", - envvar="DBT_EVENT_BUFFER_SIZE", - help="Sets the max number of events to buffer in EVENT_HISTORY.", - default=100000, - type=click.INT, -) - exclude = click.option("--exclude", envvar=None, help="Specify the nodes to exclude.") fail_fast = click.option( diff --git a/core/dbt/clients/_jinja_blocks.py b/core/dbt/clients/_jinja_blocks.py index c1ef31acf44..fa74a317649 100644 --- a/core/dbt/clients/_jinja_blocks.py +++ b/core/dbt/clients/_jinja_blocks.py @@ -1,7 +1,15 @@ import re from collections import namedtuple -import dbt.exceptions +from dbt.exceptions import ( + BlockDefinitionNotAtTop, + InternalException, + MissingCloseTag, + MissingControlFlowStartTag, + NestedTags, + UnexpectedControlFlowEndTag, + UnexpectedMacroEOF, +) def regex(pat): @@ -139,10 +147,7 @@ def _first_match(self, *patterns, **kwargs): def _expect_match(self, expected_name, *patterns, **kwargs): match = self._first_match(*patterns, **kwargs) if match is None: - msg = 'unexpected EOF, expected {}, got "{}"'.format( - expected_name, self.data[self.pos :] - ) - dbt.exceptions.raise_compiler_error(msg) + raise UnexpectedMacroEOF(expected_name, self.data[self.pos :]) return match def handle_expr(self, match): @@ -256,7 +261,7 @@ def find_tags(self): elif block_type_name is not None: yield self.handle_tag(match) else: - raise dbt.exceptions.InternalException( + raise InternalException( "Invalid regex match in next_block, expected block start, " "expr start, or comment start" ) @@ -265,13 +270,6 @@ def __iter__(self): return self.find_tags() -duplicate_tags = ( - "Got nested tags: {outer.block_type_name} (started at {outer.start}) did " - "not have a matching {{% end{outer.block_type_name} %}} before a " - "subsequent {inner.block_type_name} was found (started at {inner.start})" -) - - _CONTROL_FLOW_TAGS = { "if": "endif", "for": "endfor", @@ -319,33 +317,16 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True): found = self.stack.pop() else: expected = _CONTROL_FLOW_END_TAGS[tag.block_type_name] - dbt.exceptions.raise_compiler_error( - ( - "Got an unexpected control flow end tag, got {} but " - "never saw a preceeding {} (@ {})" - ).format(tag.block_type_name, expected, self.tag_parser.linepos(tag.start)) - ) + raise UnexpectedControlFlowEndTag(tag, expected, self.tag_parser) expected = _CONTROL_FLOW_TAGS[found] if expected != tag.block_type_name: - dbt.exceptions.raise_compiler_error( - ( - "Got an unexpected control flow end tag, got {} but " - "expected {} next (@ {})" - ).format(tag.block_type_name, expected, self.tag_parser.linepos(tag.start)) - ) + raise MissingControlFlowStartTag(tag, expected, self.tag_parser) if tag.block_type_name in allowed_blocks: if self.stack: - dbt.exceptions.raise_compiler_error( - ( - "Got a block definition inside control flow at {}. " - "All dbt block definitions must be at the top level" - ).format(self.tag_parser.linepos(tag.start)) - ) + raise BlockDefinitionNotAtTop(self.tag_parser, tag.start) if self.current is not None: - dbt.exceptions.raise_compiler_error( - duplicate_tags.format(outer=self.current, inner=tag) - ) + raise NestedTags(outer=self.current, inner=tag) if collect_raw_data: raw_data = self.data[self.last_position : tag.start] self.last_position = tag.start @@ -366,11 +347,7 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True): if self.current: linecount = self.data[: self.current.end].count("\n") + 1 - dbt.exceptions.raise_compiler_error( - ("Reached EOF without finding a close tag for {} (searched from line {})").format( - self.current.block_type_name, linecount - ) - ) + raise MissingCloseTag(self.current.block_type_name, linecount) if collect_raw_data: raw_data = self.data[self.last_position :] diff --git a/core/dbt/clients/git.py b/core/dbt/clients/git.py index 9eaa93203e0..4ddbb1969ee 100644 --- a/core/dbt/clients/git.py +++ b/core/dbt/clients/git.py @@ -14,10 +14,10 @@ ) from dbt.exceptions import ( CommandResultError, + GitCheckoutError, + GitCloningError, + GitCloningProblem, RuntimeException, - bad_package_spec, - raise_git_cloning_error, - raise_git_cloning_problem, ) from packaging import version @@ -27,16 +27,6 @@ def _is_commit(revision: str) -> bool: return bool(re.match(r"\b[0-9a-f]{40}\b", revision)) -def _raise_git_cloning_error(repo, revision, error): - stderr = error.stderr.strip() - if "usage: git" in stderr: - stderr = stderr.split("\nusage: git")[0] - if re.match("fatal: destination path '(.+)' already exists", stderr): - raise_git_cloning_error(error) - - bad_package_spec(repo, revision, stderr) - - def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirectory=None): has_revision = revision is not None is_commit = _is_commit(revision or "") @@ -64,7 +54,7 @@ def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirec try: result = run_cmd(cwd, clone_cmd, env={"LC_ALL": "C"}) except CommandResultError as exc: - _raise_git_cloning_error(repo, revision, exc) + raise GitCloningError(repo, revision, exc) if subdirectory: cwd_subdir = os.path.join(cwd, dirname or "") @@ -72,7 +62,7 @@ def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirec try: run_cmd(cwd_subdir, clone_cmd_subdir) except CommandResultError as exc: - _raise_git_cloning_error(repo, revision, exc) + raise GitCloningError(repo, revision, exc) if remove_git_dir: rmdir(os.path.join(dirname, ".git")) @@ -115,8 +105,7 @@ def checkout(cwd, repo, revision=None): try: return _checkout(cwd, repo, revision) except CommandResultError as exc: - stderr = exc.stderr.strip() - bad_package_spec(repo, revision, stderr) + raise GitCheckoutError(repo=repo, revision=revision, error=exc) def get_current_sha(cwd): @@ -145,7 +134,7 @@ def clone_and_checkout( err = exc.stderr exists = re.match("fatal: destination path '(.+)' already exists", err) if not exists: - raise_git_cloning_problem(repo) + raise GitCloningProblem(repo) directory = None start_sha = None diff --git a/core/dbt/clients/jinja.py b/core/dbt/clients/jinja.py index 5e9835952a8..c1b8865e33e 100644 --- a/core/dbt/clients/jinja.py +++ b/core/dbt/clients/jinja.py @@ -25,16 +25,19 @@ ) from dbt.clients._jinja_blocks import BlockIterator, BlockData, BlockTag -from dbt.contracts.graph.compiled import CompiledGenericTestNode -from dbt.contracts.graph.parsed import ParsedGenericTestNode +from dbt.contracts.graph.nodes import GenericTestNode from dbt.exceptions import ( - InternalException, - raise_compiler_error, + CaughtMacroException, + CaughtMacroExceptionWithNode, CompilationException, - invalid_materialization_argument, - MacroReturn, + InternalException, + InvalidMaterializationArg, JinjaRenderingException, + MacroReturn, + MaterializtionMacroNotUsed, + NoSupportedLanguagesFound, + UndefinedCompilation, UndefinedMacroException, ) from dbt import flags @@ -238,7 +241,7 @@ def exception_handler(self) -> Iterator[None]: try: yield except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e: - raise_compiler_error(str(e)) + raise CaughtMacroException(e) def call_macro(self, *args, **kwargs): # called from __call__ methods @@ -297,7 +300,7 @@ def exception_handler(self) -> Iterator[None]: try: yield except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e: - raise_compiler_error(str(e), self.macro) + raise CaughtMacroExceptionWithNode(exc=e, node=self.macro) except CompilationException as e: e.stack.append(self.macro) raise e @@ -377,7 +380,7 @@ def parse(self, parser): node.defaults.append(languages) else: - invalid_materialization_argument(materialization_name, target.name) + raise InvalidMaterializationArg(materialization_name, target.name) if SUPPORTED_LANG_ARG not in node.args: node.args.append(SUPPORTED_LANG_ARG) @@ -452,7 +455,7 @@ def __call__(self, *args, **kwargs): return self def __reduce__(self): - raise_compiler_error(f"{self.name} is undefined", node=node) + raise UndefinedCompilation(name=self.name, node=node) return Undefined @@ -620,7 +623,7 @@ def extract_toplevel_blocks( def add_rendered_test_kwargs( context: Dict[str, Any], - node: Union[ParsedGenericTestNode, CompiledGenericTestNode], + node: GenericTestNode, capture_macros: bool = False, ) -> None: """Render each of the test kwargs in the given context using the native @@ -652,13 +655,13 @@ def _convert_function(value: Any, keypath: Tuple[Union[str, int], ...]) -> Any: def get_supported_languages(node: jinja2.nodes.Macro) -> List[ModelLanguage]: if "materialization" not in node.name: - raise_compiler_error("Only materialization macros can be used with this function") + raise MaterializtionMacroNotUsed(node=node) no_kwargs = not node.defaults no_langs_found = SUPPORTED_LANG_ARG not in node.args if no_kwargs or no_langs_found: - raise_compiler_error(f"No supported_languages found in materialization macro {node.name}") + raise NoSupportedLanguagesFound(node=node) lang_idx = node.args.index(SUPPORTED_LANG_ARG) # indexing defaults from the end diff --git a/core/dbt/clients/jinja_static.py b/core/dbt/clients/jinja_static.py index 337a25eadda..d71211cea6e 100644 --- a/core/dbt/clients/jinja_static.py +++ b/core/dbt/clients/jinja_static.py @@ -1,6 +1,6 @@ import jinja2 from dbt.clients.jinja import get_environment -from dbt.exceptions import raise_compiler_error +from dbt.exceptions import MacroNamespaceNotString, MacroNameNotString def statically_extract_macro_calls(string, ctx, db_wrapper=None): @@ -117,20 +117,14 @@ def statically_parse_adapter_dispatch(func_call, ctx, db_wrapper): func_name = kwarg.value.value possible_macro_calls.append(func_name) else: - raise_compiler_error( - f"The macro_name parameter ({kwarg.value.value}) " - "to adapter.dispatch was not a string" - ) + raise MacroNameNotString(kwarg_value=kwarg.value.value) elif kwarg.key == "macro_namespace": # This will remain to enable static resolution kwarg_type = type(kwarg.value).__name__ if kwarg_type == "Const": macro_namespace = kwarg.value.value else: - raise_compiler_error( - "The macro_namespace parameter to adapter.dispatch " - f"is a {kwarg_type}, not a string" - ) + raise MacroNamespaceNotString(kwarg_type) # positional arguments if packages_arg: diff --git a/core/dbt/clients/system.py b/core/dbt/clients/system.py index d1b1c461f50..e5a02b68475 100644 --- a/core/dbt/clients/system.py +++ b/core/dbt/clients/system.py @@ -157,7 +157,8 @@ def make_symlink(source: str, link_path: str) -> None: Create a symlink at `link_path` referring to `source`. """ if not supports_symlinks(): - dbt.exceptions.system_error("create a symbolic link") + # TODO: why not import these at top? + raise dbt.exceptions.SymbolicLinkError() os.symlink(source, link_path) diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py index 7163b669001..4ae78fd3485 100644 --- a/core/dbt/compilation.py +++ b/core/dbt/compilation.py @@ -1,6 +1,6 @@ import os from collections import defaultdict -from typing import List, Dict, Any, Tuple, cast, Optional +from typing import List, Dict, Any, Tuple, Optional import networkx as nx # type: ignore import pickle @@ -12,23 +12,23 @@ from dbt.clients.system import make_directory from dbt.context.providers import generate_runtime_model_context from dbt.contracts.graph.manifest import Manifest, UniqueID -from dbt.contracts.graph.compiled import ( - COMPILED_TYPES, - CompiledGenericTestNode, +from dbt.contracts.graph.nodes import ( + ManifestNode, + ManifestSQLNode, + GenericTestNode, GraphMemberNode, InjectedCTE, - ManifestNode, - NonSourceCompiledNode, + SeedNode, ) -from dbt.contracts.graph.parsed import ParsedNode from dbt.exceptions import ( - dependency_not_found, + GraphDependencyNotFound, InternalException, RuntimeException, ) from dbt.graph import Graph from dbt.events.functions import fire_event -from dbt.events.types import FoundStats, CompilingNode, WritingInjectedSQLForNode +from dbt.events.types import FoundStats, WritingInjectedSQLForNode +from dbt.events.contextvars import get_node_info from dbt.node_types import NodeType, ModelLanguage from dbt.events.format import pluralize import dbt.tracking @@ -36,14 +36,6 @@ graph_file_name = "graph.gpickle" -def _compiled_type_for(model: ParsedNode): - if type(model) not in COMPILED_TYPES: - raise InternalException( - f"Asked to compile {type(model)} node, but it has no compiled form" - ) - return COMPILED_TYPES[type(model)] - - def print_compile_stats(stats): names = { NodeType.Model: "model", @@ -176,7 +168,7 @@ def initialize(self): # a dict for jinja rendering of SQL def _create_node_context( self, - node: NonSourceCompiledNode, + node: ManifestSQLNode, manifest: Manifest, extra_context: Dict[str, Any], ) -> Dict[str, Any]: @@ -184,7 +176,7 @@ def _create_node_context( context = generate_runtime_model_context(node, self.config, manifest) context.update(extra_context) - if isinstance(node, CompiledGenericTestNode): + if isinstance(node, GenericTestNode): # for test nodes, add a special keyword args value to the context jinja.add_rendered_test_kwargs(context, node) @@ -195,14 +187,6 @@ def add_ephemeral_prefix(self, name: str): relation_cls = adapter.Relation return relation_cls.add_ephemeral_prefix(name) - def _get_relation_name(self, node: ParsedNode): - relation_name = None - if node.is_relational and not node.is_ephemeral_model: - adapter = get_adapter(self.config) - relation_cls = adapter.Relation - relation_name = str(relation_cls.create_from(self.config, node)) - return relation_name - def _inject_ctes_into_sql(self, sql: str, ctes: List[InjectedCTE]) -> str: """ `ctes` is a list of InjectedCTEs like: @@ -261,10 +245,10 @@ def _inject_ctes_into_sql(self, sql: str, ctes: List[InjectedCTE]) -> str: def _recursively_prepend_ctes( self, - model: NonSourceCompiledNode, + model: ManifestSQLNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]], - ) -> Tuple[NonSourceCompiledNode, List[InjectedCTE]]: + ) -> Tuple[ManifestSQLNode, List[InjectedCTE]]: """This method is called by the 'compile_node' method. Starting from the node that it is passed in, it will recursively call itself using the 'extra_ctes'. The 'ephemeral' models do @@ -279,7 +263,8 @@ def _recursively_prepend_ctes( # Just to make it plain that nothing is actually injected for this case if not model.extra_ctes: - model.extra_ctes_injected = True + if not isinstance(model, SeedNode): + model.extra_ctes_injected = True manifest.update_node(model) return (model, model.extra_ctes) @@ -298,6 +283,7 @@ def _recursively_prepend_ctes( f"could not be resolved: {cte.id}" ) cte_model = manifest.nodes[cte.id] + assert not isinstance(cte_model, SeedNode) if not cte_model.is_ephemeral_model: raise InternalException(f"{cte.id} is not ephemeral") @@ -305,8 +291,6 @@ def _recursively_prepend_ctes( # This model has already been compiled, so it's been # through here before if getattr(cte_model, "compiled", False): - assert isinstance(cte_model, tuple(COMPILED_TYPES.values())) - cte_model = cast(NonSourceCompiledNode, cte_model) new_prepended_ctes = cte_model.extra_ctes # if the cte_model isn't compiled, i.e. first time here @@ -343,21 +327,19 @@ def _recursively_prepend_ctes( return model, prepended_ctes - # creates a compiled_node from the ManifestNode passed in, + # Sets compiled fields in the ManifestSQLNode passed in, # creates a "context" dictionary for jinja rendering, # and then renders the "compiled_code" using the node, the # raw_code and the context. def _compile_node( self, - node: ManifestNode, + node: ManifestSQLNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, - ) -> NonSourceCompiledNode: + ) -> ManifestSQLNode: if extra_context is None: extra_context = {} - fire_event(CompilingNode(unique_id=node.unique_id)) - data = node.to_dict(omit_none=True) data.update( { @@ -367,9 +349,8 @@ def _compile_node( "extra_ctes": [], } ) - compiled_node = _compiled_type_for(node).from_dict(data) - if compiled_node.language == ModelLanguage.python: + if node.language == ModelLanguage.python: # TODO could we also 'minify' this code at all? just aesthetic, not functional # quoating seems like something very specific to sql so far @@ -377,7 +358,7 @@ def _compile_node( # TODO try to find better way to do this, given that original_quoting = self.config.quoting self.config.quoting = {key: False for key in original_quoting.keys()} - context = self._create_node_context(compiled_node, manifest, extra_context) + context = self._create_node_context(node, manifest, extra_context) postfix = jinja.get_rendered( "{{ py_script_postfix(model) }}", @@ -385,23 +366,21 @@ def _compile_node( node, ) # we should NOT jinja render the python model's 'raw code' - compiled_node.compiled_code = f"{node.raw_code}\n\n{postfix}" + node.compiled_code = f"{node.raw_code}\n\n{postfix}" # restore quoting settings in the end since context is lazy evaluated self.config.quoting = original_quoting else: - context = self._create_node_context(compiled_node, manifest, extra_context) - compiled_node.compiled_code = jinja.get_rendered( + context = self._create_node_context(node, manifest, extra_context) + node.compiled_code = jinja.get_rendered( node.raw_code, context, node, ) - compiled_node.relation_name = self._get_relation_name(node) + node.compiled = True - compiled_node.compiled = True - - return compiled_node + return node def write_graph_file(self, linker: Linker, manifest: Manifest): filename = graph_file_name @@ -420,7 +399,7 @@ def link_node(self, linker: Linker, node: GraphMemberNode, manifest: Manifest): elif dependency in manifest.metrics: linker.dependency(node.unique_id, (manifest.metrics[dependency].unique_id)) else: - dependency_not_found(node, dependency) + raise GraphDependencyNotFound(node, dependency) def link_graph(self, linker: Linker, manifest: Manifest, add_test_edges: bool = False): for source in manifest.sources.values(): @@ -508,10 +487,13 @@ def compile(self, manifest: Manifest, write=True, add_test_edges=False) -> Graph return Graph(linker.graph) # writes the "compiled_code" into the target/compiled directory - def _write_node(self, node: NonSourceCompiledNode) -> ManifestNode: - if not node.extra_ctes_injected or node.resource_type == NodeType.Snapshot: + def _write_node(self, node: ManifestSQLNode) -> ManifestSQLNode: + if not node.extra_ctes_injected or node.resource_type in ( + NodeType.Snapshot, + NodeType.Seed, + ): return node - fire_event(WritingInjectedSQLForNode(unique_id=node.unique_id)) + fire_event(WritingInjectedSQLForNode(node_info=get_node_info())) if node.compiled_code: node.compiled_path = node.write_node( @@ -521,11 +503,11 @@ def _write_node(self, node: NonSourceCompiledNode) -> ManifestNode: def compile_node( self, - node: ManifestNode, + node: ManifestSQLNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, write: bool = True, - ) -> NonSourceCompiledNode: + ) -> ManifestSQLNode: """This is the main entry point into this code. It's called by CompileRunner.compile, GenericRPCRunner.compile, and RunTask.get_hook_sql. It calls '_compile_node' to convert diff --git a/core/dbt/config/profile.py b/core/dbt/config/profile.py index 542062a2f6f..36eddfe33e0 100644 --- a/core/dbt/config/profile.py +++ b/core/dbt/config/profile.py @@ -9,12 +9,14 @@ from dbt.clients.yaml_helper import load_yaml_text from dbt.contracts.connection import Credentials, HasCredentials from dbt.contracts.project import ProfileConfig, UserConfig -from dbt.exceptions import CompilationException -from dbt.exceptions import DbtProfileError -from dbt.exceptions import DbtProjectError -from dbt.exceptions import ValidationException -from dbt.exceptions import RuntimeException -from dbt.exceptions import validator_error_message +from dbt.exceptions import ( + CompilationException, + DbtProfileError, + DbtProjectError, + ValidationException, + RuntimeException, + ProfileConfigInvalid, +) from dbt.events.types import MissingProfileTarget from dbt.events.functions import fire_event from dbt.utils import coerce_dict_str @@ -156,7 +158,7 @@ def validate(self): dct = self.to_profile_info(serialize_credentials=True) ProfileConfig.validate(dct) except ValidationError as exc: - raise DbtProfileError(validator_error_message(exc)) from exc + raise ProfileConfigInvalid(exc) from exc @staticmethod def _credentials_from_profile( diff --git a/core/dbt/config/project.py b/core/dbt/config/project.py index 7a0eb4c8e9d..ebbe2684d22 100644 --- a/core/dbt/config/project.py +++ b/core/dbt/config/project.py @@ -16,19 +16,19 @@ import os from dbt import flags, deprecations -from dbt.clients.system import resolve_path_from_base -from dbt.clients.system import path_exists -from dbt.clients.system import load_file_contents +from dbt.clients.system import path_exists, resolve_path_from_base, load_file_contents from dbt.clients.yaml_helper import load_yaml_text from dbt.contracts.connection import QueryComment -from dbt.exceptions import DbtProjectError -from dbt.exceptions import SemverException -from dbt.exceptions import validator_error_message -from dbt.exceptions import RuntimeException +from dbt.exceptions import ( + DbtProjectError, + SemverException, + ProjectContractBroken, + ProjectContractInvalid, + RuntimeException, +) from dbt.graph import SelectionSpec from dbt.helper_types import NoValue -from dbt.semver import VersionSpecifier -from dbt.semver import versions_compatible +from dbt.semver import VersionSpecifier, versions_compatible from dbt.version import get_installed_version from dbt.utils import MultiDict from dbt.node_types import NodeType @@ -293,7 +293,7 @@ def render_package_metadata(self, renderer: PackageRenderer) -> ProjectPackageMe packages_data = renderer.render_data(self.packages_dict) packages_config = package_config_from_data(packages_data) if not self.project_name: - raise DbtProjectError(DbtProjectError("Package dbt_project.yml must have a name!")) + raise DbtProjectError("Package dbt_project.yml must have a name!") return ProjectPackageMetadata(self.project_name, packages_config.packages) def check_config_path(self, project_dict, deprecated_path, exp_path): @@ -332,7 +332,7 @@ def create_project(self, rendered: RenderComponents) -> "Project": ProjectContract.validate(rendered.project_dict) cfg = ProjectContract.from_dict(rendered.project_dict) except ValidationError as e: - raise DbtProjectError(validator_error_message(e)) from e + raise ProjectContractInvalid(e) from e # name/version are required in the Project definition, so we can assume # they are present name = cfg.name @@ -649,7 +649,7 @@ def validate(self): try: ProjectContract.validate(self.to_project_config()) except ValidationError as e: - raise DbtProjectError(validator_error_message(e)) from e + raise ProjectContractBroken(e) from e @classmethod def from_project_root( diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index ccf95c65f7c..46f03226b57 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -3,31 +3,41 @@ from copy import deepcopy from dataclasses import dataclass, field from pathlib import Path -from typing import Dict, Any, Optional, Mapping, Iterator, Iterable, Tuple, List, MutableSet, Type +from typing import ( + Any, + Dict, + Iterable, + Iterator, + Mapping, + MutableSet, + Optional, + Tuple, + Type, +) -from .profile import Profile -from .project import Project -from .renderer import DbtProjectYamlRenderer, ProfileRenderer from dbt import flags -from dbt.adapters.factory import get_relation_class_by_name, get_include_paths -from dbt.helper_types import FQNPath, PathSet, DictDefaultEmptyStr +from dbt.adapters.factory import get_include_paths, get_relation_class_by_name from dbt.config.profile import read_user_config from dbt.config.project import load_raw_project from dbt.contracts.connection import AdapterRequiredConfig, Credentials, HasCredentials from dbt.contracts.graph.manifest import ManifestMetadata -from dbt.contracts.relation import ComponentName -from dbt.ui import warning_tag - from dbt.contracts.project import Configuration, UserConfig +from dbt.contracts.relation import ComponentName +from dbt.dataclass_schema import ValidationError from dbt.exceptions import ( - RuntimeException, + ConfigContractBroken, DbtProjectError, - validator_error_message, - warn_or_error, - raise_compiler_error, + NonUniquePackageName, + RuntimeException, + UninstalledPackagesFound, ) +from dbt.events.functions import warn_or_error +from dbt.events.types import UnusedResourceConfigPath +from dbt.helper_types import DictDefaultEmptyStr, FQNPath, PathSet -from dbt.dataclass_schema import ValidationError +from .profile import Profile +from .project import Project +from .renderer import DbtProjectYamlRenderer, ProfileRenderer def load_project( @@ -227,7 +237,7 @@ def validate(self): try: Configuration.validate(self.serialize()) except ValidationError as e: - raise DbtProjectError(validator_error_message(e)) from e + raise ConfigContractBroken(e) from e @classmethod def collect_parts(cls: Type["RuntimeConfig"], args: Any) -> Tuple[Project, Profile]: @@ -240,7 +250,7 @@ def collect_parts(cls: Type["RuntimeConfig"], args: Any) -> Tuple[Project, Profi args, ) project = load_project(project_root, bool(flags.VERSION_CHECK), profile, cli_vars) - return (project, profile) + return project, profile # Called in main.py, lib.py, task/base.py @classmethod @@ -309,11 +319,11 @@ def get_resource_config_paths(self) -> Dict[str, PathSet]: "exposures": self._get_config_paths(self.exposures), } - def get_unused_resource_config_paths( + def warn_for_unused_resource_config_paths( self, resource_fqns: Mapping[str, PathSet], disabled: PathSet, - ) -> List[FQNPath]: + ) -> None: """Return a list of lists of strings, where each inner list of strings represents a type + FQN path of a resource configuration that is not used. @@ -327,23 +337,13 @@ def get_unused_resource_config_paths( for config_path in config_paths: if not _is_config_used(config_path, fqns): - unused_resource_config_paths.append((resource_type,) + config_path) - return unused_resource_config_paths + resource_path = ".".join(i for i in ((resource_type,) + config_path)) + unused_resource_config_paths.append(resource_path) - def warn_for_unused_resource_config_paths( - self, - resource_fqns: Mapping[str, PathSet], - disabled: PathSet, - ) -> None: - unused = self.get_unused_resource_config_paths(resource_fqns, disabled) - if len(unused) == 0: + if len(unused_resource_config_paths) == 0: return - msg = UNUSED_RESOURCE_CONFIGURATION_PATH_MESSAGE.format( - len(unused), "\n".join("- {}".format(".".join(u)) for u in unused) - ) - - warn_or_error(msg, log_fmt=warning_tag("{}")) + warn_or_error(UnusedResourceConfigPath(unused_config_paths=unused_resource_config_paths)) def load_dependencies(self, base_only=False) -> Mapping[str, "RuntimeConfig"]: if self.dependencies is None: @@ -357,22 +357,15 @@ def load_dependencies(self, base_only=False) -> Mapping[str, "RuntimeConfig"]: count_packages_specified = len(self.packages.packages) # type: ignore count_packages_installed = len(tuple(self._get_project_directories())) if count_packages_specified > count_packages_installed: - raise_compiler_error( - f"dbt found {count_packages_specified} package(s) " - f"specified in packages.yml, but only " - f"{count_packages_installed} package(s) installed " - f'in {self.packages_install_path}. Run "dbt deps" to ' - f"install package dependencies." + raise UninstalledPackagesFound( + count_packages_specified, + count_packages_installed, + self.packages_install_path, ) project_paths = itertools.chain(internal_packages, self._get_project_directories()) for project_name, project in self.load_projects(project_paths): if project_name in all_projects: - raise_compiler_error( - f"dbt found more than one package with the name " - f'"{project_name}" included in this project. Package ' - f"names must be unique in a project. Please rename " - f"one of these packages." - ) + raise NonUniquePackageName(project_name) all_projects[project_name] = project self.dependencies = all_projects return self.dependencies @@ -627,14 +620,6 @@ def from_args(cls: Type[RuntimeConfig], args: Any) -> "RuntimeConfig": return cls.from_parts(project=project, profile=profile, args=args) -UNUSED_RESOURCE_CONFIGURATION_PATH_MESSAGE = """\ -Configuration paths exist in your dbt_project.yml file which do not \ -apply to any resources. -There are {} unused configuration paths: -{} -""" - - def _is_config_used(path, fqns): if fqns: for fqn in fqns: diff --git a/core/dbt/config/utils.py b/core/dbt/config/utils.py index b3be5d5501b..76fd8f6b466 100644 --- a/core/dbt/config/utils.py +++ b/core/dbt/config/utils.py @@ -9,7 +9,7 @@ from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer from dbt.events.functions import fire_event from dbt.events.types import InvalidVarsYAML -from dbt.exceptions import ValidationException, raise_compiler_error +from dbt.exceptions import ValidationException, VarsArgNotYamlDict def parse_cli_vars(var: str) -> Dict[str, Any]: @@ -19,11 +19,7 @@ def parse_cli_vars(var: str) -> Dict[str, Any]: if var_type is dict: return cli_vars else: - type_name = var_type.__name__ - raise_compiler_error( - "The --vars argument must be a YAML dictionary, but was " - "of type '{}'".format(type_name) - ) + raise VarsArgNotYamlDict(var_type) except ValidationException: fire_event(InvalidVarsYAML()) raise diff --git a/core/dbt/constants.py b/core/dbt/constants.py index 1599df3e335..63213476e54 100644 --- a/core/dbt/constants.py +++ b/core/dbt/constants.py @@ -1,3 +1,10 @@ SECRET_ENV_PREFIX = "DBT_ENV_SECRET_" DEFAULT_ENV_PLACEHOLDER = "DBT_DEFAULT_PLACEHOLDER" METADATA_ENV_PREFIX = "DBT_ENV_CUSTOM_ENV_" + +MAXIMUM_SEED_SIZE = 1 * 1024 * 1024 +MAXIMUM_SEED_SIZE_NAME = "1MB" + +PIN_PACKAGE_URL = ( + "https://docs.getdbt.com/docs/package-management#section-specifying-package-versions" +) diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index 68b5edb98c1..59984cb96ab 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -4,19 +4,22 @@ from dbt import flags from dbt import tracking +from dbt import utils from dbt.clients.jinja import get_rendered from dbt.clients.yaml_helper import yaml, safe_load, SafeLoader, Loader, Dumper # noqa: F401 from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER -from dbt.contracts.graph.compiled import CompiledResource +from dbt.contracts.graph.nodes import Resource from dbt.exceptions import ( - CompilationException, + DisallowSecretEnvVar, + EnvVarMissing, MacroReturn, - raise_compiler_error, - raise_parsing_error, - disallow_secret_env_var, + RequiredVarNotFound, + SetStrictWrongType, + ZipStrictWrongType, ) from dbt.events.functions import fire_event, get_invocation_id -from dbt.events.types import MacroEventInfo, MacroEventDebug +from dbt.events.types import JinjaLogInfo, JinjaLogDebug +from dbt.events.contextvars import get_node_info from dbt.version import __version__ as dbt_version # These modules are added to the context. Consider alternative @@ -126,18 +129,17 @@ def __new__(mcls, name, bases, dct): class Var: - UndefinedVarError = "Required var '{}' not found in config:\nVars supplied to {} = {}" _VAR_NOTSET = object() def __init__( self, context: Mapping[str, Any], cli_vars: Mapping[str, Any], - node: Optional[CompiledResource] = None, + node: Optional[Resource] = None, ) -> None: self._context: Mapping[str, Any] = context self._cli_vars: Mapping[str, Any] = cli_vars - self._node: Optional[CompiledResource] = node + self._node: Optional[Resource] = node self._merged: Mapping[str, Any] = self._generate_merged() def _generate_merged(self) -> Mapping[str, Any]: @@ -151,10 +153,7 @@ def node_name(self): return "" def get_missing_var(self, var_name): - dct = {k: self._merged[k] for k in self._merged} - pretty_vars = json.dumps(dct, sort_keys=True, indent=4) - msg = self.UndefinedVarError.format(var_name, self.node_name, pretty_vars) - raise_compiler_error(msg, self._node) + raise RequiredVarNotFound(var_name, self._merged, self._node) def has_var(self, var_name: str): return var_name in self._merged @@ -298,7 +297,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: """ return_value = None if var.startswith(SECRET_ENV_PREFIX): - disallow_secret_env_var(var) + raise DisallowSecretEnvVar(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -313,8 +312,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) if os.environ.get("DBT_MACRO_DEBUGGING"): @@ -495,7 +493,7 @@ def set_strict(value: Iterable[Any]) -> Set[Any]: try: return set(value) except TypeError as e: - raise CompilationException(e) + raise SetStrictWrongType(e) @contextmember("zip") @staticmethod @@ -539,7 +537,7 @@ def zip_strict(*args: Iterable[Any]) -> Iterable[Any]: try: return zip(*args) except TypeError as e: - raise CompilationException(e) + raise ZipStrictWrongType(e) @contextmember @staticmethod @@ -557,9 +555,9 @@ def log(msg: str, info: bool = False) -> str: {% endmacro %}" """ if info: - fire_event(MacroEventInfo(msg=msg)) + fire_event(JinjaLogInfo(msg=msg, node_info=get_node_info())) else: - fire_event(MacroEventDebug(msg=msg)) + fire_event(JinjaLogDebug(msg=msg, node_info=get_node_info())) return "" @contextproperty @@ -687,6 +685,19 @@ def diff_of_two_dicts( dict_diff.update({k: dict_a[k]}) return dict_diff + @contextmember + @staticmethod + def local_md5(value: str) -> str: + """Calculates an MD5 hash of the given string. + It's called "local_md5" to emphasize that it runs locally in dbt (in jinja context) and not an MD5 SQL command. + + :param value: The value to hash + + Usage: + {% set value_hash = local_md5("hello world") %} + """ + return utils.md5(value) + def generate_base_context(cli_vars: Dict[str, Any]) -> Dict[str, Any]: ctx = BaseContext(cli_vars) diff --git a/core/dbt/context/configured.py b/core/dbt/context/configured.py index 64fdcd935b3..7339bdb1152 100644 --- a/core/dbt/context/configured.py +++ b/core/dbt/context/configured.py @@ -8,7 +8,7 @@ from dbt.context.base import contextproperty, contextmember, Var from dbt.context.target import TargetContext -from dbt.exceptions import raise_parsing_error, disallow_secret_env_var +from dbt.exceptions import EnvVarMissing, DisallowSecretEnvVar class ConfiguredContext(TargetContext): @@ -87,7 +87,7 @@ def var(self) -> ConfiguredVar: def env_var(self, var: str, default: Optional[str] = None) -> str: return_value = None if var.startswith(SECRET_ENV_PREFIX): - disallow_secret_env_var(var) + raise DisallowSecretEnvVar(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -105,8 +105,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) class MacroResolvingContext(ConfiguredContext): diff --git a/core/dbt/context/docs.py b/core/dbt/context/docs.py index 26096caa108..89a652736dd 100644 --- a/core/dbt/context/docs.py +++ b/core/dbt/context/docs.py @@ -1,13 +1,12 @@ from typing import Any, Dict, Union from dbt.exceptions import ( - doc_invalid_args, - doc_target_not_found, + DocTargetNotFound, + InvalidDocArgs, ) from dbt.config.runtime import RuntimeConfig -from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro, ResultNode from dbt.context.base import contextmember from dbt.context.configured import SchemaYamlContext @@ -17,7 +16,7 @@ class DocsRuntimeContext(SchemaYamlContext): def __init__( self, config: RuntimeConfig, - node: Union[ParsedMacro, CompileResultNode], + node: Union[Macro, ResultNode], manifest: Manifest, current_project: str, ) -> None: @@ -53,9 +52,9 @@ def doc(self, *args: str) -> str: elif len(args) == 2: doc_package_name, doc_name = args else: - doc_invalid_args(self.node, args) + raise InvalidDocArgs(self.node, args) - # ParsedDocumentation + # Documentation target_doc = self.manifest.resolve_doc( doc_name, doc_package_name, @@ -69,7 +68,9 @@ def doc(self, *args: str) -> str: # TODO CT-211 source_file.add_node(self.node.unique_id) # type: ignore[union-attr] else: - doc_target_not_found(self.node, doc_name, doc_package_name) + raise DocTargetNotFound( + node=self.node, target_doc_name=doc_name, target_doc_package=doc_package_name + ) return target_doc.block_contents diff --git a/core/dbt/context/exceptions_jinja.py b/core/dbt/context/exceptions_jinja.py new file mode 100644 index 00000000000..5663b4701e0 --- /dev/null +++ b/core/dbt/context/exceptions_jinja.py @@ -0,0 +1,142 @@ +import functools +from typing import NoReturn + +from dbt.events.functions import warn_or_error +from dbt.events.helpers import env_secrets, scrub_secrets +from dbt.events.types import JinjaLogWarning + +from dbt.exceptions import ( + RuntimeException, + MissingConfig, + MissingMaterialization, + MissingRelation, + AmbiguousAlias, + AmbiguousCatalogMatch, + CacheInconsistency, + DataclassNotDict, + CompilationException, + DatabaseException, + DependencyNotFound, + DependencyException, + DuplicatePatchPath, + DuplicateResourceName, + InvalidPropertyYML, + NotImplementedException, + RelationWrongType, +) + + +def warn(msg, node=None): + warn_or_error(JinjaLogWarning(msg=msg), node=node) + return "" + + +def missing_config(model, name) -> NoReturn: + raise MissingConfig(unique_id=model.unique_id, name=name) + + +def missing_materialization(model, adapter_type) -> NoReturn: + raise MissingMaterialization(model=model, adapter_type=adapter_type) + + +def missing_relation(relation, model=None) -> NoReturn: + raise MissingRelation(relation, model) + + +def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn: + raise AmbiguousAlias(node_1, node_2, duped_name) + + +def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn: + raise AmbiguousCatalogMatch(unique_id, match_1, match_2) + + +def raise_cache_inconsistent(message) -> NoReturn: + raise CacheInconsistency(message) + + +def raise_dataclass_not_dict(obj) -> NoReturn: + raise DataclassNotDict(obj) + + +def raise_compiler_error(msg, node=None) -> NoReturn: + raise CompilationException(msg, node) + + +def raise_database_error(msg, node=None) -> NoReturn: + raise DatabaseException(msg, node) + + +def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn: + raise DependencyNotFound(node, node_description, required_pkg) + + +def raise_dependency_error(msg) -> NoReturn: + raise DependencyException(scrub_secrets(msg, env_secrets())) + + +def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn: + raise DuplicatePatchPath(patch_1, existing_patch_path) + + +def raise_duplicate_resource_name(node_1, node_2) -> NoReturn: + raise DuplicateResourceName(node_1, node_2) + + +def raise_invalid_property_yml_version(path, issue) -> NoReturn: + raise InvalidPropertyYML(path, issue) + + +def raise_not_implemented(msg) -> NoReturn: + raise NotImplementedException(msg) + + +def relation_wrong_type(relation, expected_type, model=None) -> NoReturn: + raise RelationWrongType(relation, expected_type, model) + + +# Update this when a new function should be added to the +# dbt context's `exceptions` key! +CONTEXT_EXPORTS = { + fn.__name__: fn + for fn in [ + warn, + missing_config, + missing_materialization, + missing_relation, + raise_ambiguous_alias, + raise_ambiguous_catalog_match, + raise_cache_inconsistent, + raise_dataclass_not_dict, + raise_compiler_error, + raise_database_error, + raise_dep_not_found, + raise_dependency_error, + raise_duplicate_patch_name, + raise_duplicate_resource_name, + raise_invalid_property_yml_version, + raise_not_implemented, + relation_wrong_type, + ] +} + + +# wraps context based exceptions in node info +def wrapper(model): + def wrap(func): + @functools.wraps(func) + def inner(*args, **kwargs): + try: + return func(*args, **kwargs) + except RuntimeException as exc: + exc.add_node(model) + raise exc + + return inner + + return wrap + + +def wrapped_exports(model): + wrap = wrapper(model) + return {name: wrap(export) for name, export in CONTEXT_EXPORTS.items()} diff --git a/core/dbt/context/macro_resolver.py b/core/dbt/context/macro_resolver.py index 2766dc4130c..6e70bafd05e 100644 --- a/core/dbt/context/macro_resolver.py +++ b/core/dbt/context/macro_resolver.py @@ -1,10 +1,10 @@ from typing import Dict, MutableMapping, Optional -from dbt.contracts.graph.parsed import ParsedMacro -from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error +from dbt.contracts.graph.nodes import Macro +from dbt.exceptions import DuplicateMacroName, PackageNotFoundForMacro from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME from dbt.clients.jinja import MacroGenerator -MacroNamespace = Dict[str, ParsedMacro] +MacroNamespace = Dict[str, Macro] # This class builds the MacroResolver by adding macros @@ -21,7 +21,7 @@ class MacroResolver: def __init__( self, - macros: MutableMapping[str, ParsedMacro], + macros: MutableMapping[str, Macro], root_project_name: str, internal_package_names, ) -> None: @@ -77,7 +77,7 @@ def _build_macros_by_name(self): def _add_macro_to( self, package_namespaces: Dict[str, MacroNamespace], - macro: ParsedMacro, + macro: Macro, ): if macro.package_name in package_namespaces: namespace = package_namespaces[macro.package_name] @@ -86,10 +86,10 @@ def _add_macro_to( package_namespaces[macro.package_name] = namespace if macro.name in namespace: - raise_duplicate_macro_name(macro, macro, macro.package_name) + raise DuplicateMacroName(macro, macro, macro.package_name) package_namespaces[macro.package_name][macro.name] = macro - def add_macro(self, macro: ParsedMacro): + def add_macro(self, macro: Macro): macro_name: str = macro.name # internal macros (from plugins) will be processed separately from @@ -187,7 +187,7 @@ def get_from_package(self, package_name: Optional[str], name: str) -> Optional[M elif package_name in self.macro_resolver.packages: macro = self.macro_resolver.packages[package_name].get(name) else: - raise_compiler_error(f"Could not find package '{package_name}'") + raise PackageNotFoundForMacro(package_name) if not macro: return None macro_func = MacroGenerator(macro, self.ctx, self.node, self.thread_ctx) diff --git a/core/dbt/context/macros.py b/core/dbt/context/macros.py index dccd376b876..921480ec05a 100644 --- a/core/dbt/context/macros.py +++ b/core/dbt/context/macros.py @@ -1,9 +1,9 @@ from typing import Any, Dict, Iterable, Union, Optional, List, Iterator, Mapping, Set from dbt.clients.jinja import MacroGenerator, MacroStack -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME -from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error +from dbt.exceptions import DuplicateMacroName, PackageNotFoundForMacro FlatNamespace = Dict[str, MacroGenerator] @@ -75,7 +75,7 @@ def get_from_package(self, package_name: Optional[str], name: str) -> Optional[M elif package_name in self.packages: return self.packages[package_name].get(name) else: - raise_compiler_error(f"Could not find package '{package_name}'") + raise PackageNotFoundForMacro(package_name) # This class builds the MacroNamespace by adding macros to @@ -112,7 +112,7 @@ def __init__( def _add_macro_to( self, hierarchy: Dict[str, FlatNamespace], - macro: ParsedMacro, + macro: Macro, macro_func: MacroGenerator, ): if macro.package_name in hierarchy: @@ -122,10 +122,10 @@ def _add_macro_to( hierarchy[macro.package_name] = namespace if macro.name in namespace: - raise_duplicate_macro_name(macro_func.macro, macro, macro.package_name) + raise DuplicateMacroName(macro_func.macro, macro, macro.package_name) hierarchy[macro.package_name][macro.name] = macro_func - def add_macro(self, macro: ParsedMacro, ctx: Dict[str, Any]): + def add_macro(self, macro: Macro, ctx: Dict[str, Any]): macro_name: str = macro.name # MacroGenerator is in clients/jinja.py @@ -147,13 +147,11 @@ def add_macro(self, macro: ParsedMacro, ctx: Dict[str, Any]): elif macro.package_name == self.root_package: self.globals[macro_name] = macro_func - def add_macros(self, macros: Iterable[ParsedMacro], ctx: Dict[str, Any]): + def add_macros(self, macros: Iterable[Macro], ctx: Dict[str, Any]): for macro in macros: self.add_macro(macro, ctx) - def build_namespace( - self, macros: Iterable[ParsedMacro], ctx: Dict[str, Any] - ) -> MacroNamespace: + def build_namespace(self, macros: Iterable[Macro], ctx: Dict[str, Any]) -> MacroNamespace: self.add_macros(macros, ctx) # Iterate in reverse-order and overwrite: the packages that are first diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index 597b526e384..2e7af0a79f2 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -19,46 +19,50 @@ from dbt.clients import agate_helper from dbt.clients.jinja import get_rendered, MacroGenerator, MacroStack from dbt.config import RuntimeConfig, Project -from .base import contextmember, contextproperty, Var -from .configured import FQNLookup -from .context_config import ContextConfig from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER +from dbt.context.base import contextmember, contextproperty, Var +from dbt.context.configured import FQNLookup +from dbt.context.context_config import ContextConfig +from dbt.context.exceptions_jinja import wrapped_exports from dbt.context.macro_resolver import MacroResolver, TestMacroNamespace -from .macros import MacroNamespaceBuilder, MacroNamespace -from .manifest import ManifestContext +from dbt.context.macros import MacroNamespaceBuilder, MacroNamespace +from dbt.context.manifest import ManifestContext from dbt.contracts.connection import AdapterResponse from dbt.contracts.graph.manifest import Manifest, Disabled -from dbt.contracts.graph.compiled import ( - CompiledResource, - CompiledSeedNode, +from dbt.contracts.graph.nodes import ( + Macro, + Exposure, + Metric, + SeedNode, + SourceDefinition, + Resource, ManifestNode, ) -from dbt.contracts.graph.parsed import ( - ParsedMacro, - ParsedExposure, - ParsedMetric, - ParsedSeedNode, - ParsedSourceDefinition, -) from dbt.contracts.graph.metrics import MetricReference, ResolvedMetricReference from dbt.events.functions import get_metadata_vars from dbt.exceptions import ( CompilationException, - ParsingException, + ConflictingConfigKeys, + DisallowSecretEnvVar, + EnvVarMissing, InternalException, - ValidationException, + InvalidInlineModelConfig, + InvalidNumberSourceArgs, + InvalidPersistDocsValueType, + LoadAgateTableNotSeed, + LoadAgateTableValueError, + MacroInvalidDispatchArg, + MacrosSourcesUnWriteable, + MetricInvalidArgs, + MissingConfig, + OperationsCannotRefEphemeralNodes, + PackageNotInDeps, + ParsingException, + RefBadContext, + RefInvalidArgs, RuntimeException, - macro_invalid_dispatch_arg, - missing_config, - raise_compiler_error, - ref_invalid_args, - metric_invalid_args, - ref_target_not_found, - target_not_found, - ref_bad_context, - wrapped_exports, - raise_parsing_error, - disallow_secret_env_var, + TargetNotFound, + ValidationException, ) from dbt.config import IsFQNResource from dbt.node_types import NodeType, ModelLanguage @@ -143,7 +147,7 @@ def dispatch( raise CompilationException(msg) if packages is not None: - raise macro_invalid_dispatch_arg(macro_name) + raise MacroInvalidDispatchArg(macro_name) namespace = macro_namespace @@ -237,7 +241,7 @@ def __call__(self, *args: str) -> RelationProxy: elif len(args) == 2: package, name = args else: - ref_invalid_args(self.model, args) + raise RefInvalidArgs(node=self.model, args=args) self.validate_args(name, package) return self.resolve(name, package) @@ -261,9 +265,7 @@ def validate_args(self, source_name: str, table_name: str): def __call__(self, *args: str) -> RelationProxy: if len(args) != 2: - raise_compiler_error( - f"source() takes exactly two arguments ({len(args)} given)", self.model - ) + raise InvalidNumberSourceArgs(args, node=self.model) self.validate_args(args[0], args[1]) return self.resolve(args[0], args[1]) @@ -298,7 +300,7 @@ def __call__(self, *args: str) -> MetricReference: elif len(args) == 2: package, name = args else: - metric_invalid_args(self.model, args) + raise MetricInvalidArgs(node=self.model, args=args) self.validate_args(name, package) return self.resolve(name, package) @@ -319,12 +321,7 @@ def _transform_config(self, config): if oldkey in config: newkey = oldkey.replace("_", "-") if newkey in config: - raise_compiler_error( - 'Invalid config, has conflicting keys "{}" and "{}"'.format( - oldkey, newkey - ), - self.model, - ) + raise ConflictingConfigKeys(oldkey, newkey, node=self.model) config[newkey] = config.pop(oldkey) return config @@ -334,7 +331,7 @@ def __call__(self, *args, **kwargs): elif len(args) == 0 and len(kwargs) > 0: opts = kwargs else: - raise_compiler_error("Invalid inline model config", self.model) + raise InvalidInlineModelConfig(node=self.model) opts = self._transform_config(opts) @@ -382,7 +379,7 @@ def _lookup(self, name, default=_MISSING): else: result = self.model.config.get(name, default) if result is _MISSING: - missing_config(self.model, name) + raise MissingConfig(unique_id=self.model.unique_id, name=name) return result def require(self, name, validator=None): @@ -404,20 +401,14 @@ def get(self, name, default=None, validator=None): def persist_relation_docs(self) -> bool: persist_docs = self.get("persist_docs", default={}) if not isinstance(persist_docs, dict): - raise_compiler_error( - f"Invalid value provided for 'persist_docs'. Expected dict " - f"but received {type(persist_docs)}" - ) + raise InvalidPersistDocsValueType(persist_docs) return persist_docs.get("relation", False) def persist_column_docs(self) -> bool: persist_docs = self.get("persist_docs", default={}) if not isinstance(persist_docs, dict): - raise_compiler_error( - f"Invalid value provided for 'persist_docs'. Expected dict " - f"but received {type(persist_docs)}" - ) + raise InvalidPersistDocsValueType(persist_docs) return persist_docs.get("columns", False) @@ -476,10 +467,11 @@ def resolve(self, target_name: str, target_package: Optional[str] = None) -> Rel ) if target_model is None or isinstance(target_model, Disabled): - ref_target_not_found( - self.model, - target_name, - target_package, + raise TargetNotFound( + node=self.model, + target_name=target_name, + target_kind="node", + target_package=target_package, disabled=isinstance(target_model, Disabled), ) self.validate(target_model, target_name, target_package) @@ -497,7 +489,7 @@ def validate( ) -> None: if resolved.unique_id not in self.model.depends_on.nodes: args = self._repack_args(target_name, target_package) - ref_bad_context(self.model, args) + raise RefBadContext(node=self.model, args=args) class OperationRefResolver(RuntimeRefResolver): @@ -512,13 +504,8 @@ def validate( def create_relation(self, target_model: ManifestNode, name: str) -> RelationProxy: if target_model.is_ephemeral_model: # In operations, we can't ref() ephemeral nodes, because - # ParsedMacros do not support set_cte - raise_compiler_error( - "Operations can not ref() ephemeral nodes, but {} is ephemeral".format( - target_model.name - ), - self.model, - ) + # Macros do not support set_cte + raise OperationsCannotRefEphemeralNodes(target_model.name, node=self.model) else: return super().create_relation(target_model, name) @@ -541,7 +528,7 @@ def resolve(self, source_name: str, table_name: str): ) if target_source is None or isinstance(target_source, Disabled): - target_not_found( + raise TargetNotFound( node=self.model, target_name=f"{source_name}.{table_name}", target_kind="source", @@ -568,7 +555,7 @@ def resolve(self, target_name: str, target_package: Optional[str] = None) -> Met ) if target_metric is None or isinstance(target_metric, Disabled): - target_not_found( + raise TargetNotFound( node=self.model, target_name=target_name, target_kind="metric", @@ -584,9 +571,9 @@ def __init__( self, context: Dict[str, Any], config: RuntimeConfig, - node: CompiledResource, + node: Resource, ) -> None: - self._node: CompiledResource + self._node: Resource self._config: RuntimeConfig = config super().__init__(context, config.cli_vars, node=node) @@ -597,7 +584,7 @@ def packages_for_node(self) -> Iterable[Project]: if package_name != self._config.project_name: if package_name not in dependencies: # I don't think this is actually reachable - raise_compiler_error(f"Node package named {package_name} not found!", self._node) + raise PackageNotInDeps(package_name, node=self._node) yield dependencies[package_name] yield self._config @@ -690,7 +677,7 @@ def __init__( raise InternalException(f"Invalid provider given to context: {provider}") # mypy appeasement - we know it'll be a RuntimeConfig self.config: RuntimeConfig - self.model: Union[ParsedMacro, ManifestNode] = model + self.model: Union[Macro, ManifestNode] = model super().__init__(config, manifest, model.package_name) self.sql_results: Dict[str, AttrDict] = {} self.context_config: Optional[ContextConfig] = context_config @@ -779,8 +766,8 @@ def inner(value: T) -> None: @contextmember def write(self, payload: str) -> str: # macros/source defs aren't 'writeable'. - if isinstance(self.model, (ParsedMacro, ParsedSourceDefinition)): - raise_compiler_error('cannot "write" macros or sources') + if isinstance(self.model, (Macro, SourceDefinition)): + raise MacrosSourcesUnWriteable(node=self.model) self.model.build_path = self.model.write_node(self.config.target_path, "run", payload) return "" @@ -795,20 +782,19 @@ def try_or_compiler_error( try: return func(*args, **kwargs) except Exception: - raise_compiler_error(message_if_exception, self.model) + raise CompilationException(message_if_exception, self.model) @contextmember def load_agate_table(self) -> agate.Table: - if not isinstance(self.model, (ParsedSeedNode, CompiledSeedNode)): - raise_compiler_error( - "can only load_agate_table for seeds (got a {})".format(self.model.resource_type) - ) + if not isinstance(self.model, SeedNode): + raise LoadAgateTableNotSeed(self.model.resource_type, node=self.model) + assert self.model.root_path path = os.path.join(self.model.root_path, self.model.original_file_path) column_types = self.model.config.column_types try: table = agate_helper.from_csv(path, text_columns=column_types) except ValueError as e: - raise_compiler_error(str(e)) + raise LoadAgateTableValueError(e, node=self.model) table.original_abspath = os.path.abspath(path) return table @@ -1210,7 +1196,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: """ return_value = None if var.startswith(SECRET_ENV_PREFIX): - disallow_secret_env_var(var) + raise DisallowSecretEnvVar(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -1219,7 +1205,13 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: if return_value is not None: # Save the env_var value in the manifest and the var name in the source_file. # If this is compiling, do not save because it's irrelevant to parsing. - if self.model and not hasattr(self.model, "compiled"): + compiling = ( + True + if hasattr(self.model, "compiled") + and getattr(self.model, "compiled", False) is True + else False + ) + if self.model and not compiling: # If the environment variable is set from a default, store a string indicating # that so we can skip partial parsing. Otherwise the file will be scheduled for # reparsing. If the default changes, the file will have been updated and therefore @@ -1237,8 +1229,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: source_file.env_vars.append(var) # type: ignore[union-attr] return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) @contextproperty def selected_resources(self) -> List[str]: @@ -1274,7 +1265,7 @@ class MacroContext(ProviderContext): def __init__( self, - model: ParsedMacro, + model: Macro, config: RuntimeConfig, manifest: Manifest, provider: Provider, @@ -1389,7 +1380,7 @@ def generate_parser_model_context( def generate_generate_name_macro_context( - macro: ParsedMacro, + macro: Macro, config: RuntimeConfig, manifest: Manifest, ) -> Dict[str, Any]: @@ -1407,7 +1398,7 @@ def generate_runtime_model_context( def generate_runtime_macro_context( - macro: ParsedMacro, + macro: Macro, config: RuntimeConfig, manifest: Manifest, package_name: Optional[str], @@ -1419,7 +1410,7 @@ def generate_runtime_macro_context( class ExposureRefResolver(BaseResolver): def __call__(self, *args) -> str: if len(args) not in (1, 2): - ref_invalid_args(self.model, args) + raise RefInvalidArgs(node=self.model, args=args) self.model.refs.append(list(args)) return "" @@ -1427,15 +1418,21 @@ def __call__(self, *args) -> str: class ExposureSourceResolver(BaseResolver): def __call__(self, *args) -> str: if len(args) != 2: - raise_compiler_error( - f"source() takes exactly two arguments ({len(args)} given)", self.model - ) + raise InvalidNumberSourceArgs(args, node=self.model) self.model.sources.append(list(args)) return "" +class ExposureMetricResolver(BaseResolver): + def __call__(self, *args) -> str: + if len(args) not in (1, 2): + raise MetricInvalidArgs(node=self.model, args=args) + self.model.metrics.append(list(args)) + return "" + + def generate_parse_exposure( - exposure: ParsedExposure, + exposure: Exposure, config: RuntimeConfig, manifest: Manifest, package_name: str, @@ -1454,6 +1451,12 @@ def generate_parse_exposure( project, manifest, ), + "metric": ExposureMetricResolver( + None, + exposure, + project, + manifest, + ), } @@ -1465,7 +1468,7 @@ def __call__(self, *args) -> str: elif len(args) == 2: package, name = args else: - ref_invalid_args(self.model, args) + raise RefInvalidArgs(node=self.model, args=args) self.validate_args(name, package) self.model.refs.append(list(args)) return "" @@ -1479,7 +1482,7 @@ def validate_args(self, name, package): def generate_parse_metrics( - metric: ParsedMetric, + metric: Metric, config: RuntimeConfig, manifest: Manifest, package_name: str, @@ -1555,7 +1558,7 @@ def _build_test_namespace(self): def env_var(self, var: str, default: Optional[str] = None) -> str: return_value = None if var.startswith(SECRET_ENV_PREFIX): - disallow_secret_env_var(var) + raise DisallowSecretEnvVar(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -1581,8 +1584,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: source_file.add_env_var(var, yaml_key, name) # type: ignore[union-attr] return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) def generate_test_context( diff --git a/core/dbt/context/secret.py b/core/dbt/context/secret.py index 11a6dc54f07..da13509ef50 100644 --- a/core/dbt/context/secret.py +++ b/core/dbt/context/secret.py @@ -4,7 +4,7 @@ from .base import BaseContext, contextmember from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER -from dbt.exceptions import raise_parsing_error +from dbt.exceptions import EnvVarMissing SECRET_PLACEHOLDER = "$$$DBT_SECRET_START$$${}$$$DBT_SECRET_END$$$" @@ -50,8 +50,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: self.env_vars[var] = return_value if var in os.environ else DEFAULT_ENV_PLACEHOLDER return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) def generate_secret_context(cli_vars: Dict[str, Any]) -> Dict[str, Any]: diff --git a/core/dbt/contracts/connection.py b/core/dbt/contracts/connection.py index a32bb443099..fe4ae912229 100644 --- a/core/dbt/contracts/connection.py +++ b/core/dbt/contracts/connection.py @@ -16,6 +16,7 @@ from dbt.utils import translate_aliases from dbt.events.functions import fire_event from dbt.events.types import NewConnectionOpening +from dbt.events.contextvars import get_node_info from typing_extensions import Protocol from dbt.dataclass_schema import ( dbtClassMixin, @@ -112,7 +113,9 @@ def __init__(self, opener: Callable[[Connection], Connection]): self.opener = opener def resolve(self, connection: Connection) -> Connection: - fire_event(NewConnectionOpening(connection_state=connection.state)) + fire_event( + NewConnectionOpening(connection_state=connection.state, node_info=get_node_info()) + ) return self.opener(connection) diff --git a/core/dbt/contracts/files.py b/core/dbt/contracts/files.py index b915a0d1197..93f12a1411e 100644 --- a/core/dbt/contracts/files.py +++ b/core/dbt/contracts/files.py @@ -1,18 +1,16 @@ import hashlib import os from dataclasses import dataclass, field + from mashumaro.types import SerializableType from typing import List, Optional, Union, Dict, Any +from dbt.constants import MAXIMUM_SEED_SIZE from dbt.dataclass_schema import dbtClassMixin, StrEnum from .util import SourceKey -MAXIMUM_SEED_SIZE = 1 * 1024 * 1024 -MAXIMUM_SEED_SIZE_NAME = "1MB" - - class ParseFileType(StrEnum): Macro = "macro" Model = "model" diff --git a/core/dbt/contracts/graph/compiled.py b/core/dbt/contracts/graph/compiled.py deleted file mode 100644 index 118d104f537..00000000000 --- a/core/dbt/contracts/graph/compiled.py +++ /dev/null @@ -1,235 +0,0 @@ -from dbt.contracts.graph.parsed import ( - HasTestMetadata, - ParsedNode, - ParsedAnalysisNode, - ParsedSingularTestNode, - ParsedHookNode, - ParsedModelNode, - ParsedExposure, - ParsedMetric, - ParsedResource, - ParsedRPCNode, - ParsedSqlNode, - ParsedGenericTestNode, - ParsedSeedNode, - ParsedSnapshotNode, - ParsedSourceDefinition, - SeedConfig, - TestConfig, - same_seeds, -) -from dbt.node_types import NodeType -from dbt.contracts.util import Replaceable - -from dbt.dataclass_schema import dbtClassMixin -from dataclasses import dataclass, field -from typing import Optional, List, Union, Dict, Type - - -@dataclass -class InjectedCTE(dbtClassMixin, Replaceable): - id: str - sql: str - - -@dataclass -class CompiledNodeMixin(dbtClassMixin): - # this is a special mixin class to provide a required argument. If a node - # is missing a `compiled` flag entirely, it must not be a CompiledNode. - compiled: bool - - -@dataclass -class CompiledNode(ParsedNode, CompiledNodeMixin): - compiled_code: Optional[str] = None - extra_ctes_injected: bool = False - extra_ctes: List[InjectedCTE] = field(default_factory=list) - relation_name: Optional[str] = None - _pre_injected_sql: Optional[str] = None - - def set_cte(self, cte_id: str, sql: str): - """This is the equivalent of what self.extra_ctes[cte_id] = sql would - do if extra_ctes were an OrderedDict - """ - for cte in self.extra_ctes: - if cte.id == cte_id: - cte.sql = sql - break - else: - self.extra_ctes.append(InjectedCTE(id=cte_id, sql=sql)) - - def __post_serialize__(self, dct): - dct = super().__post_serialize__(dct) - if "_pre_injected_sql" in dct: - del dct["_pre_injected_sql"] - return dct - - -@dataclass -class CompiledAnalysisNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Analysis]}) - - -@dataclass -class CompiledHookNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Operation]}) - index: Optional[int] = None - - -@dataclass -class CompiledModelNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Model]}) - - -# TODO: rm? -@dataclass -class CompiledRPCNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.RPCCall]}) - - -@dataclass -class CompiledSqlNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.SqlOperation]}) - - -@dataclass -class CompiledSeedNode(CompiledNode): - # keep this in sync with ParsedSeedNode! - resource_type: NodeType = field(metadata={"restrict": [NodeType.Seed]}) - config: SeedConfig = field(default_factory=SeedConfig) - - @property - def empty(self): - """Seeds are never empty""" - return False - - def same_body(self, other) -> bool: - return same_seeds(self, other) - - -@dataclass -class CompiledSnapshotNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]}) - - -@dataclass -class CompiledSingularTestNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) - # Was not able to make mypy happy and keep the code working. We need to - # refactor the various configs. - config: TestConfig = field(default_factory=TestConfig) # type:ignore - - -@dataclass -class CompiledGenericTestNode(CompiledNode, HasTestMetadata): - # keep this in sync with ParsedGenericTestNode! - resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) - column_name: Optional[str] = None - file_key_name: Optional[str] = None - # Was not able to make mypy happy and keep the code working. We need to - # refactor the various configs. - config: TestConfig = field(default_factory=TestConfig) # type:ignore - - def same_contents(self, other) -> bool: - if other is None: - return False - - return self.same_config(other) and self.same_fqn(other) and True - - -CompiledTestNode = Union[CompiledSingularTestNode, CompiledGenericTestNode] - - -PARSED_TYPES: Dict[Type[CompiledNode], Type[ParsedResource]] = { - CompiledAnalysisNode: ParsedAnalysisNode, - CompiledModelNode: ParsedModelNode, - CompiledHookNode: ParsedHookNode, - CompiledRPCNode: ParsedRPCNode, - CompiledSqlNode: ParsedSqlNode, - CompiledSeedNode: ParsedSeedNode, - CompiledSnapshotNode: ParsedSnapshotNode, - CompiledSingularTestNode: ParsedSingularTestNode, - CompiledGenericTestNode: ParsedGenericTestNode, -} - - -COMPILED_TYPES: Dict[Type[ParsedResource], Type[CompiledNode]] = { - ParsedAnalysisNode: CompiledAnalysisNode, - ParsedModelNode: CompiledModelNode, - ParsedHookNode: CompiledHookNode, - ParsedRPCNode: CompiledRPCNode, - ParsedSqlNode: CompiledSqlNode, - ParsedSeedNode: CompiledSeedNode, - ParsedSnapshotNode: CompiledSnapshotNode, - ParsedSingularTestNode: CompiledSingularTestNode, - ParsedGenericTestNode: CompiledGenericTestNode, -} - - -# for some types, the compiled type is the parsed type, so make this easy -CompiledType = Union[Type[CompiledNode], Type[ParsedResource]] -CompiledResource = Union[ParsedResource, CompiledNode] - - -def compiled_type_for(parsed: ParsedNode) -> CompiledType: - if type(parsed) in COMPILED_TYPES: - return COMPILED_TYPES[type(parsed)] - else: - return type(parsed) - - -def parsed_instance_for(compiled: CompiledNode) -> ParsedResource: - cls = PARSED_TYPES.get(type(compiled)) - if cls is None: - # how??? - raise ValueError("invalid resource_type: {}".format(compiled.resource_type)) - - return cls.from_dict(compiled.to_dict(omit_none=True)) - - -NonSourceCompiledNode = Union[ - CompiledAnalysisNode, - CompiledSingularTestNode, - CompiledModelNode, - CompiledHookNode, - CompiledRPCNode, - CompiledSqlNode, - CompiledGenericTestNode, - CompiledSeedNode, - CompiledSnapshotNode, -] - -NonSourceParsedNode = Union[ - ParsedAnalysisNode, - ParsedSingularTestNode, - ParsedHookNode, - ParsedModelNode, - ParsedRPCNode, - ParsedSqlNode, - ParsedGenericTestNode, - ParsedSeedNode, - ParsedSnapshotNode, -] - - -# This is anything that can be in manifest.nodes. -ManifestNode = Union[ - NonSourceCompiledNode, - NonSourceParsedNode, -] - -# We allow either parsed or compiled nodes, or parsed sources, as some -# 'compile()' calls in the runner actually just return the original parsed -# node they were given. -CompileResultNode = Union[ - ManifestNode, - ParsedSourceDefinition, -] - -# anything that participates in the graph: sources, exposures, metrics, -# or manifest nodes -GraphMemberNode = Union[ - CompileResultNode, - ParsedExposure, - ParsedMetric, -] diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index a2d22e6e315..c43012ec521 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -16,29 +16,24 @@ TypeVar, Callable, Generic, - cast, AbstractSet, ClassVar, ) from typing_extensions import Protocol from uuid import UUID -from dbt.contracts.graph.compiled import ( - CompileResultNode, +from dbt.contracts.graph.nodes import ( + Macro, + Documentation, + SourceDefinition, + GenericTestNode, + Exposure, + Metric, + UnpatchedSourceDefinition, ManifestNode, - NonSourceCompiledNode, GraphMemberNode, -) -from dbt.contracts.graph.parsed import ( - ParsedMacro, - ParsedDocumentation, - ParsedSourceDefinition, - ParsedGenericTestNode, - ParsedExposure, - ParsedMetric, - HasUniqueID, - UnpatchedSourceDefinition, - ManifestNodes, + ResultNode, + BaseNode, ) from dbt.contracts.graph.unparsed import SourcePatch from dbt.contracts.files import SourceFile, SchemaSourceFile, FileHash, AnySourceFile @@ -46,14 +41,14 @@ from dbt.dataclass_schema import dbtClassMixin from dbt.exceptions import ( CompilationException, - raise_duplicate_resource_name, - raise_compiler_error, + DuplicateResourceName, + DuplicateMacroInPackage, + DuplicateMaterializationName, ) from dbt.helper_types import PathSet from dbt.events.functions import fire_event from dbt.events.types import MergedFromState from dbt.node_types import NodeType -from dbt.ui import line_wrap_message from dbt import flags from dbt import tracking import dbt.utils @@ -96,7 +91,7 @@ def find(self, key, package: Optional[PackageName], manifest: "Manifest"): return self.perform_lookup(unique_id, manifest) return None - def add_doc(self, doc: ParsedDocumentation): + def add_doc(self, doc: Documentation): if doc.name not in self.storage: self.storage[doc.name] = {} self.storage[doc.name][doc.package_name] = doc.unique_id @@ -105,7 +100,7 @@ def populate(self, manifest): for doc in manifest.docs.values(): self.add_doc(doc) - def perform_lookup(self, unique_id: UniqueID, manifest) -> ParsedDocumentation: + def perform_lookup(self, unique_id: UniqueID, manifest) -> Documentation: if unique_id not in manifest.docs: raise dbt.exceptions.InternalException( f"Doc {unique_id} found in cache but not found in manifest" @@ -127,7 +122,7 @@ def find(self, search_name, package: Optional[PackageName], manifest: "Manifest" return self.perform_lookup(unique_id, manifest) return None - def add_source(self, source: ParsedSourceDefinition): + def add_source(self, source: SourceDefinition): if source.search_name not in self.storage: self.storage[source.search_name] = {} @@ -138,7 +133,7 @@ def populate(self, manifest): if hasattr(source, "source_name"): self.add_source(source) - def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> ParsedSourceDefinition: + def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> SourceDefinition: if unique_id not in manifest.sources: raise dbt.exceptions.InternalException( f"Source {unique_id} found in cache but not found in manifest" @@ -198,7 +193,7 @@ def find(self, search_name, package: Optional[PackageName], manifest: "Manifest" return self.perform_lookup(unique_id, manifest) return None - def add_metric(self, metric: ParsedMetric): + def add_metric(self, metric: Metric): if metric.search_name not in self.storage: self.storage[metric.search_name] = {} @@ -209,7 +204,7 @@ def populate(self, manifest): if hasattr(metric, "name"): self.add_metric(metric) - def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> ParsedMetric: + def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> Metric: if unique_id not in manifest.metrics: raise dbt.exceptions.InternalException( f"Metric {unique_id} found in cache but not found in manifest" @@ -325,7 +320,7 @@ def _sort_values(dct): def build_node_edges(nodes: List[ManifestNode]): - """Build the forward and backward edges on the given list of ParsedNodes + """Build the forward and backward edges on the given list of ManifestNodes and return them as two separate dictionaries, each mapping unique IDs to lists of edges. """ @@ -343,10 +338,10 @@ def build_node_edges(nodes: List[ManifestNode]): # Build a map of children of macros and generic tests def build_macro_edges(nodes: List[Any]): forward_edges: Dict[str, List[str]] = { - n.unique_id: [] for n in nodes if n.unique_id.startswith("macro") or n.depends_on.macros + n.unique_id: [] for n in nodes if n.unique_id.startswith("macro") or n.depends_on_macros } for node in nodes: - for unique_id in node.depends_on.macros: + for unique_id in node.depends_on_macros: if unique_id in forward_edges.keys(): forward_edges[unique_id].append(node.unique_id) return _sort_values(forward_edges) @@ -365,7 +360,7 @@ class Locality(enum.IntEnum): @dataclass class MacroCandidate: locality: Locality - macro: ParsedMacro + macro: Macro def __eq__(self, other: object) -> bool: if not isinstance(other, MacroCandidate): @@ -403,12 +398,7 @@ def __eq__(self, other: object) -> bool: return NotImplemented equal = self.specificity == other.specificity and self.locality == other.locality if equal: - raise_compiler_error( - "Found two materializations with the name {} (packages {} and " - "{}). dbt cannot resolve this ambiguity".format( - self.macro.name, self.macro.package_name, other.macro.package_name - ) - ) + raise DuplicateMaterializationName(self.macro, other) return equal @@ -430,16 +420,14 @@ def __lt__(self, other: object) -> bool: class CandidateList(List[M]): - def last(self) -> Optional[ParsedMacro]: + def last(self) -> Optional[Macro]: if not self: return None self.sort() return self[-1].macro -def _get_locality( - macro: ParsedMacro, root_project_name: str, internal_packages: Set[str] -) -> Locality: +def _get_locality(macro: Macro, root_project_name: str, internal_packages: Set[str]) -> Locality: if macro.package_name == root_project_name: return Locality.Root elif macro.package_name in internal_packages: @@ -465,16 +453,16 @@ class Disabled(Generic[D]): target: D -MaybeMetricNode = Optional[Union[ParsedMetric, Disabled[ParsedMetric]]] +MaybeMetricNode = Optional[Union[Metric, Disabled[Metric]]] -MaybeDocumentation = Optional[ParsedDocumentation] +MaybeDocumentation = Optional[Documentation] MaybeParsedSource = Optional[ Union[ - ParsedSourceDefinition, - Disabled[ParsedSourceDefinition], + SourceDefinition, + Disabled[SourceDefinition], ] ] @@ -514,7 +502,7 @@ def __init__(self): def find_macro_by_name( self, name: str, root_project_name: str, package: Optional[str] - ) -> Optional[ParsedMacro]: + ) -> Optional[Macro]: """Find a macro in the graph by its name and package name, or None for any package. The root project name is used to determine priority: - locally defined macros come first @@ -537,7 +525,7 @@ def filter(candidate: MacroCandidate) -> bool: def find_generate_macro_by_name( self, component: str, root_project_name: str - ) -> Optional[ParsedMacro]: + ) -> Optional[Macro]: """ The `generate_X_name` macros are similar to regular ones, but ignore imported packages. @@ -606,11 +594,11 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin): # is added it must all be added in the __reduce_ex__ method in the # args tuple in the right position. nodes: MutableMapping[str, ManifestNode] = field(default_factory=dict) - sources: MutableMapping[str, ParsedSourceDefinition] = field(default_factory=dict) - macros: MutableMapping[str, ParsedMacro] = field(default_factory=dict) - docs: MutableMapping[str, ParsedDocumentation] = field(default_factory=dict) - exposures: MutableMapping[str, ParsedExposure] = field(default_factory=dict) - metrics: MutableMapping[str, ParsedMetric] = field(default_factory=dict) + sources: MutableMapping[str, SourceDefinition] = field(default_factory=dict) + macros: MutableMapping[str, Macro] = field(default_factory=dict) + docs: MutableMapping[str, Documentation] = field(default_factory=dict) + exposures: MutableMapping[str, Exposure] = field(default_factory=dict) + metrics: MutableMapping[str, Metric] = field(default_factory=dict) selectors: MutableMapping[str, Any] = field(default_factory=dict) files: MutableMapping[str, AnySourceFile] = field(default_factory=dict) metadata: ManifestMetadata = field(default_factory=ManifestMetadata) @@ -658,7 +646,7 @@ def __post_deserialize__(cls, obj): obj._lock = flags.MP_CONTEXT.Lock() return obj - def sync_update_node(self, new_node: NonSourceCompiledNode) -> NonSourceCompiledNode: + def sync_update_node(self, new_node: ManifestNode) -> ManifestNode: """update the node with a lock. The only time we should want to lock is when compiling an ephemeral ancestor of a node at runtime, because multiple threads could be just-in-time compiling the same ephemeral @@ -671,21 +659,21 @@ def sync_update_node(self, new_node: NonSourceCompiledNode) -> NonSourceCompiled with self._lock: existing = self.nodes[new_node.unique_id] if getattr(existing, "compiled", False): - # already compiled -> must be a NonSourceCompiledNode - return cast(NonSourceCompiledNode, existing) + # already compiled + return existing _update_into(self.nodes, new_node) return new_node - def update_exposure(self, new_exposure: ParsedExposure): + def update_exposure(self, new_exposure: Exposure): _update_into(self.exposures, new_exposure) - def update_metric(self, new_metric: ParsedMetric): + def update_metric(self, new_metric: Metric): _update_into(self.metrics, new_metric) def update_node(self, new_node: ManifestNode): _update_into(self.nodes, new_node) - def update_source(self, new_source: ParsedSourceDefinition): + def update_source(self, new_source: SourceDefinition): _update_into(self.sources, new_source) def build_flat_graph(self): @@ -738,7 +726,7 @@ def _materialization_candidates_for( def find_materialization_macro_by_name( self, project_name: str, materialization_name: str, adapter_type: str - ) -> Optional[ParsedMacro]: + ) -> Optional[Macro]: candidates: CandidateList = CandidateList( chain.from_iterable( self._materialization_candidates_for( @@ -943,8 +931,8 @@ def resolve_source( search_name = f"{target_source_name}.{target_table_name}" candidates = _search_packages(current_project, node_package) - source: Optional[ParsedSourceDefinition] = None - disabled: Optional[List[ParsedSourceDefinition]] = None + source: Optional[SourceDefinition] = None + disabled: Optional[List[SourceDefinition]] = None for pkg in candidates: source = self.source_lookup.find(search_name, pkg, self) @@ -968,8 +956,8 @@ def resolve_metric( node_package: str, ) -> MaybeMetricNode: - metric: Optional[ParsedMetric] = None - disabled: Optional[List[ParsedMetric]] = None + metric: Optional[Metric] = None + disabled: Optional[List[Metric]] = None candidates = _search_packages(current_project, node_package, target_metric_package) for pkg in candidates: @@ -992,7 +980,7 @@ def resolve_doc( package: Optional[str], current_project: str, node_package: str, - ) -> Optional[ParsedDocumentation]: + ) -> Optional[Documentation]: """Resolve the given documentation. This follows the same algorithm as resolve_ref except the is_enabled checks are unnecessary as docs are always enabled. @@ -1011,6 +999,7 @@ def merge_from_artifact( adapter, other: "WritableManifest", selected: AbstractSet[UniqueID], + favor_state: bool = False, ) -> None: """Given the selected unique IDs and a writable manifest, update this manifest by replacing any unselected nodes with their counterpart. @@ -1025,7 +1014,10 @@ def merge_from_artifact( node.resource_type in refables and not node.is_ephemeral and unique_id not in selected - and not adapter.get_relation(current.database, current.schema, current.identifier) + and ( + not adapter.get_relation(current.database, current.schema, current.identifier) + or favor_state + ) ): merged.add(unique_id) self.nodes[unique_id] = node.replace(deferred=True) @@ -1040,29 +1032,10 @@ def merge_from_artifact( # Methods that were formerly in ParseResult - def add_macro(self, source_file: SourceFile, macro: ParsedMacro): + def add_macro(self, source_file: SourceFile, macro: Macro): if macro.unique_id in self.macros: # detect that the macro exists and emit an error - other_path = self.macros[macro.unique_id].original_file_path - # subtract 2 for the "Compilation Error" indent - # note that the line wrap eats newlines, so if you want newlines, - # this is the result :( - msg = line_wrap_message( - f"""\ - dbt found two macros named "{macro.name}" in the project - "{macro.package_name}". - - - To fix this error, rename or remove one of the following - macros: - - - {macro.original_file_path} - - - {other_path} - """, - subtract=2, - ) - raise_compiler_error(msg) + raise DuplicateMacroInPackage(macro=macro, macro_mapping=self.macros) self.macros[macro.unique_id] = macro source_file.macros.append(macro.unique_id) @@ -1082,30 +1055,30 @@ def add_source(self, source_file: SchemaSourceFile, source: UnpatchedSourceDefin self.sources[source.unique_id] = source # type: ignore source_file.sources.append(source.unique_id) - def add_node_nofile(self, node: ManifestNodes): + def add_node_nofile(self, node: ManifestNode): # nodes can't be overwritten! _check_duplicates(node, self.nodes) self.nodes[node.unique_id] = node - def add_node(self, source_file: AnySourceFile, node: ManifestNodes, test_from=None): + def add_node(self, source_file: AnySourceFile, node: ManifestNode, test_from=None): self.add_node_nofile(node) if isinstance(source_file, SchemaSourceFile): - if isinstance(node, ParsedGenericTestNode): + if isinstance(node, GenericTestNode): assert test_from source_file.add_test(node.unique_id, test_from) - if isinstance(node, ParsedMetric): + if isinstance(node, Metric): source_file.metrics.append(node.unique_id) - if isinstance(node, ParsedExposure): + if isinstance(node, Exposure): source_file.exposures.append(node.unique_id) else: source_file.nodes.append(node.unique_id) - def add_exposure(self, source_file: SchemaSourceFile, exposure: ParsedExposure): + def add_exposure(self, source_file: SchemaSourceFile, exposure: Exposure): _check_duplicates(exposure, self.exposures) self.exposures[exposure.unique_id] = exposure source_file.exposures.append(exposure.unique_id) - def add_metric(self, source_file: SchemaSourceFile, metric: ParsedMetric): + def add_metric(self, source_file: SchemaSourceFile, metric: Metric): _check_duplicates(metric, self.metrics) self.metrics[metric.unique_id] = metric source_file.metrics.append(metric.unique_id) @@ -1117,20 +1090,20 @@ def add_disabled_nofile(self, node: GraphMemberNode): else: self.disabled[node.unique_id] = [node] - def add_disabled(self, source_file: AnySourceFile, node: CompileResultNode, test_from=None): + def add_disabled(self, source_file: AnySourceFile, node: ResultNode, test_from=None): self.add_disabled_nofile(node) if isinstance(source_file, SchemaSourceFile): - if isinstance(node, ParsedGenericTestNode): + if isinstance(node, GenericTestNode): assert test_from source_file.add_test(node.unique_id, test_from) - if isinstance(node, ParsedMetric): + if isinstance(node, Metric): source_file.metrics.append(node.unique_id) - if isinstance(node, ParsedExposure): + if isinstance(node, Exposure): source_file.exposures.append(node.unique_id) else: source_file.nodes.append(node.unique_id) - def add_doc(self, source_file: SourceFile, doc: ParsedDocumentation): + def add_doc(self, source_file: SourceFile, doc: Documentation): _check_duplicates(doc, self.docs) self.docs[doc.unique_id] = doc source_file.docs.append(doc.unique_id) @@ -1183,32 +1156,32 @@ def __init__(self, macros): @dataclass -@schema_version("manifest", 7) +@schema_version("manifest", 8) class WritableManifest(ArtifactMixin): nodes: Mapping[UniqueID, ManifestNode] = field( metadata=dict(description=("The nodes defined in the dbt project and its dependencies")) ) - sources: Mapping[UniqueID, ParsedSourceDefinition] = field( + sources: Mapping[UniqueID, SourceDefinition] = field( metadata=dict(description=("The sources defined in the dbt project and its dependencies")) ) - macros: Mapping[UniqueID, ParsedMacro] = field( + macros: Mapping[UniqueID, Macro] = field( metadata=dict(description=("The macros defined in the dbt project and its dependencies")) ) - docs: Mapping[UniqueID, ParsedDocumentation] = field( + docs: Mapping[UniqueID, Documentation] = field( metadata=dict(description=("The docs defined in the dbt project and its dependencies")) ) - exposures: Mapping[UniqueID, ParsedExposure] = field( + exposures: Mapping[UniqueID, Exposure] = field( metadata=dict( description=("The exposures defined in the dbt project and its dependencies") ) ) - metrics: Mapping[UniqueID, ParsedMetric] = field( + metrics: Mapping[UniqueID, Metric] = field( metadata=dict(description=("The metrics defined in the dbt project and its dependencies")) ) selectors: Mapping[UniqueID, Any] = field( metadata=dict(description=("The selectors defined in selectors.yml")) ) - disabled: Optional[Mapping[UniqueID, List[CompileResultNode]]] = field( + disabled: Optional[Mapping[UniqueID, List[ResultNode]]] = field( metadata=dict(description="A mapping of the disabled nodes in the target") ) parent_map: Optional[NodeEdgeMap] = field( @@ -1229,7 +1202,7 @@ class WritableManifest(ArtifactMixin): @classmethod def compatible_previous_versions(self): - return [("manifest", 4), ("manifest", 5), ("manifest", 6)] + return [("manifest", 4), ("manifest", 5), ("manifest", 6), ("manifest", 7)] def __post_serialize__(self, dct): for unique_id, node in dct["nodes"].items(): @@ -1238,9 +1211,9 @@ def __post_serialize__(self, dct): return dct -def _check_duplicates(value: HasUniqueID, src: Mapping[str, HasUniqueID]): +def _check_duplicates(value: BaseNode, src: Mapping[str, BaseNode]): if value.unique_id in src: - raise_duplicate_resource_name(value, src[value.unique_id]) + raise DuplicateResourceName(value, src[value.unique_id]) K_T = TypeVar("K_T") diff --git a/core/dbt/contracts/graph/metrics.py b/core/dbt/contracts/graph/metrics.py index 20222b4a32b..b895aa5e2f5 100644 --- a/core/dbt/contracts/graph/metrics.py +++ b/core/dbt/contracts/graph/metrics.py @@ -12,7 +12,7 @@ def __str__(self): class ResolvedMetricReference(MetricReference): """ - Simple proxy over a ParsedMetric which delegates property + Simple proxy over a Metric which delegates property lookups to the underlying node. Also adds helper functions for working with metrics (ie. __str__ and templating functions) """ diff --git a/core/dbt/contracts/graph/parsed.py b/core/dbt/contracts/graph/nodes.py similarity index 65% rename from core/dbt/contracts/graph/parsed.py rename to core/dbt/contracts/graph/nodes.py index 860f3fdf662..033318a34c1 100644 --- a/core/dbt/contracts/graph/parsed.py +++ b/core/dbt/contracts/graph/nodes.py @@ -2,7 +2,6 @@ import time from dataclasses import dataclass, field from mashumaro.types import SerializableType -from pathlib import Path from typing import ( Optional, Union, @@ -12,19 +11,15 @@ Sequence, Tuple, Iterator, - TypeVar, ) from dbt.dataclass_schema import dbtClassMixin, ExtensibleDbtClassMixin from dbt.clients.system import write_file -from dbt.contracts.files import FileHash, MAXIMUM_SEED_SIZE_NAME +from dbt.contracts.files import FileHash from dbt.contracts.graph.unparsed import ( - UnparsedNode, - UnparsedDocumentation, Quoting, Docs, - UnparsedBaseNode, FreshnessThreshold, ExternalTable, HasYamlMetadata, @@ -41,7 +36,14 @@ ) from dbt.contracts.util import Replaceable, AdditionalPropertiesMixin from dbt.events.proto_types import NodeInfo -from dbt.exceptions import warn_or_error +from dbt.events.functions import warn_or_error +from dbt.events.types import ( + SeedIncreased, + SeedExceedsLimitSamePath, + SeedExceedsLimitAndPathChanged, + SeedExceedsLimitChecksumChanged, +) +from dbt.events.contextvars import set_contextvars from dbt import flags from dbt.node_types import ModelLanguage, NodeType @@ -57,50 +59,97 @@ SnapshotConfig, ) +# ===================================================================== +# This contains the classes for all of the nodes and node-like objects +# in the manifest. In the "nodes" dictionary of the manifest we find +# all of the objects in the ManifestNode union below. In addition the +# manifest contains "macros", "sources", "metrics", "exposures", "docs", +# and "disabled" dictionaries. +# +# The SeedNode is a ManifestNode, but can't be compiled because it has +# no SQL. +# +# All objects defined in this file should have BaseNode as a parent +# class. +# +# The two objects which do not show up in the DAG are Macro and +# Documentation. +# ===================================================================== + + +# ================================================== +# Various parent classes and node attribute classes +# ================================================== + @dataclass -class ColumnInfo(AdditionalPropertiesMixin, ExtensibleDbtClassMixin, Replaceable): +class BaseNode(dbtClassMixin, Replaceable): + """All nodes or node-like objects in this file should have this as a base class""" + name: str - description: str = "" - meta: Dict[str, Any] = field(default_factory=dict) - data_type: Optional[str] = None - quote: Optional[bool] = None - tags: List[str] = field(default_factory=list) - _extra: Dict[str, Any] = field(default_factory=dict) + resource_type: NodeType + package_name: str + path: str + original_file_path: str + unique_id: str + @property + def search_name(self): + return self.name -@dataclass -class HasFqn(dbtClassMixin, Replaceable): - fqn: List[str] + @property + def file_id(self): + return f"{self.package_name}://{self.original_file_path}" - def same_fqn(self, other: "HasFqn") -> bool: - return self.fqn == other.fqn + @property + def is_refable(self): + return self.resource_type in NodeType.refable() + @property + def should_store_failures(self): + return False -@dataclass -class HasUniqueID(dbtClassMixin, Replaceable): - unique_id: str + # will this node map to an object in the database? + @property + def is_relational(self): + return self.resource_type in NodeType.refable() + + @property + def is_ephemeral(self): + return self.config.materialized == "ephemeral" + + @property + def is_ephemeral_model(self): + return self.is_refable and self.is_ephemeral + + def get_materialization(self): + return self.config.materialized @dataclass -class MacroDependsOn(dbtClassMixin, Replaceable): - macros: List[str] = field(default_factory=list) +class GraphNode(BaseNode): + """Nodes in the DAG. Macro and Documentation don't have fqn.""" - # 'in' on lists is O(n) so this is O(n^2) for # of macros - def add_macro(self, value: str): - if value not in self.macros: - self.macros.append(value) + fqn: List[str] + + def same_fqn(self, other) -> bool: + return self.fqn == other.fqn @dataclass -class DependsOn(MacroDependsOn): - nodes: List[str] = field(default_factory=list) +class ColumnInfo(AdditionalPropertiesMixin, ExtensibleDbtClassMixin, Replaceable): + """Used in all ManifestNodes and SourceDefinition""" - def add_node(self, value: str): - if value not in self.nodes: - self.nodes.append(value) + name: str + description: str = "" + meta: Dict[str, Any] = field(default_factory=dict) + data_type: Optional[str] = None + quote: Optional[bool] = None + tags: List[str] = field(default_factory=list) + _extra: Dict[str, Any] = field(default_factory=dict) +# Metrics, exposures, @dataclass class HasRelationMetadata(dbtClassMixin, Replaceable): database: Optional[str] @@ -117,57 +166,29 @@ def __pre_deserialize__(cls, data): return data -class ParsedNodeMixins(dbtClassMixin): - resource_type: NodeType - depends_on: DependsOn - config: NodeConfig - - @property - def is_refable(self): - return self.resource_type in NodeType.refable() - - @property - def should_store_failures(self): - return self.resource_type == NodeType.Test and ( - self.config.store_failures - if self.config.store_failures is not None - else flags.STORE_FAILURES - ) - - # will this node map to an object in the database? - @property - def is_relational(self): - return self.resource_type in NodeType.refable() or self.should_store_failures +@dataclass +class MacroDependsOn(dbtClassMixin, Replaceable): + """Used only in the Macro class""" - @property - def is_ephemeral(self): - return self.config.materialized == "ephemeral" + macros: List[str] = field(default_factory=list) - @property - def is_ephemeral_model(self): - return self.is_refable and self.is_ephemeral + # 'in' on lists is O(n) so this is O(n^2) for # of macros + def add_macro(self, value: str): + if value not in self.macros: + self.macros.append(value) - @property - def depends_on_nodes(self): - return self.depends_on.nodes - def patch(self, patch: "ParsedNodePatch"): - """Given a ParsedNodePatch, add the new information to the node.""" - # explicitly pick out the parts to update so we don't inadvertently - # step on the model name or anything - # Note: config should already be updated - self.patch_path: Optional[str] = patch.file_id - # update created_at so process_docs will run in partial parsing - self.created_at = time.time() - self.description = patch.description - self.columns = patch.columns +@dataclass +class DependsOn(MacroDependsOn): + nodes: List[str] = field(default_factory=list) - def get_materialization(self): - return self.config.materialized + def add_node(self, value: str): + if value not in self.nodes: + self.nodes.append(value) @dataclass -class ParsedNodeMandatory(UnparsedNode, HasUniqueID, HasFqn, HasRelationMetadata, Replaceable): +class ParsedNodeMandatory(GraphNode, HasRelationMetadata, Replaceable): alias: str checksum: FileHash config: NodeConfig = field(default_factory=NodeConfig) @@ -177,6 +198,8 @@ def identifier(self): return self.alias +# This needs to be in all ManifestNodes and also in SourceDefinition, +# because of "source freshness" @dataclass class NodeInfoMixin: _event_status: Dict[str, Any] = field(default_factory=dict) @@ -196,25 +219,30 @@ def node_info(self): node_info_msg = NodeInfo(**node_info) return node_info_msg + def update_event_status(self, **kwargs): + for k, v in kwargs.items(): + self._event_status[k] = v + set_contextvars(node_info=self.node_info) + + def clear_event_status(self): + self._event_status = dict() + @dataclass -class ParsedNodeDefaults(NodeInfoMixin, ParsedNodeMandatory): +class ParsedNode(NodeInfoMixin, ParsedNodeMandatory, SerializableType): tags: List[str] = field(default_factory=list) - refs: List[List[str]] = field(default_factory=list) - sources: List[List[str]] = field(default_factory=list) - metrics: List[List[str]] = field(default_factory=list) - depends_on: DependsOn = field(default_factory=DependsOn) description: str = field(default="") columns: Dict[str, ColumnInfo] = field(default_factory=dict) meta: Dict[str, Any] = field(default_factory=dict) docs: Docs = field(default_factory=Docs) patch_path: Optional[str] = None - compiled_path: Optional[str] = None build_path: Optional[str] = None deferred: bool = False unrendered_config: Dict[str, Any] = field(default_factory=dict) created_at: float = field(default_factory=lambda: time.time()) config_call_dict: Dict[str, Any] = field(default_factory=dict) + relation_name: Optional[str] = None + raw_code: str = "" def write_node(self, target_path: str, subdirectory: str, payload: str): if os.path.basename(self.path) == os.path.basename(self.original_file_path): @@ -228,12 +256,6 @@ def write_node(self, target_path: str, subdirectory: str, payload: str): write_file(full_path, payload) return full_path - -T = TypeVar("T", bound="ParsedNode") - - -@dataclass -class ParsedNode(ParsedNodeDefaults, ParsedNodeMixins, SerializableType): def _serialize(self): return self.to_dict() @@ -250,26 +272,26 @@ def _deserialize(cls, dct: Dict[str, int]): # between them. resource_type = dct["resource_type"] if resource_type == "model": - return ParsedModelNode.from_dict(dct) + return ModelNode.from_dict(dct) elif resource_type == "analysis": - return ParsedAnalysisNode.from_dict(dct) + return AnalysisNode.from_dict(dct) elif resource_type == "seed": - return ParsedSeedNode.from_dict(dct) + return SeedNode.from_dict(dct) elif resource_type == "rpc": - return ParsedRPCNode.from_dict(dct) + return RPCNode.from_dict(dct) elif resource_type == "sql": - return ParsedSqlNode.from_dict(dct) + return SqlNode.from_dict(dct) elif resource_type == "test": if "test_metadata" in dct: - return ParsedGenericTestNode.from_dict(dct) + return GenericTestNode.from_dict(dct) else: - return ParsedSingularTestNode.from_dict(dct) + return SingularTestNode.from_dict(dct) elif resource_type == "operation": - return ParsedHookNode.from_dict(dct) + return HookNode.from_dict(dct) elif resource_type == "seed": - return ParsedSeedNode.from_dict(dct) + return SeedNode.from_dict(dct) elif resource_type == "snapshot": - return ParsedSnapshotNode.from_dict(dct) + return SnapshotNode.from_dict(dct) else: return cls.from_dict(dct) @@ -285,10 +307,7 @@ def _persist_relation_docs(self) -> bool: return bool(self.config.persist_docs.get("relation")) return False - def same_body(self: T, other: T) -> bool: - return self.raw_code == other.raw_code - - def same_persisted_description(self: T, other: T) -> bool: + def same_persisted_description(self, other) -> bool: # the check on configs will handle the case where we have different # persist settings, so we only have to care about the cases where they # are the same.. @@ -305,7 +324,10 @@ def same_persisted_description(self: T, other: T) -> bool: return True - def same_database_representation(self, other: T) -> bool: + def same_body(self, other) -> bool: + return self.raw_code == other.raw_code + + def same_database_representation(self, other) -> bool: # compare the config representation, not the node's config value. This # compares the configured value, rather than the ultimate value (so # generate_*_name and unset values derived from the target are @@ -318,13 +340,24 @@ def same_database_representation(self, other: T) -> bool: return False return True - def same_config(self, old: T) -> bool: + def same_config(self, old) -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self: T, old: Optional[T]) -> bool: + def patch(self, patch: "ParsedNodePatch"): + """Given a ParsedNodePatch, add the new information to the node.""" + # explicitly pick out the parts to update so we don't inadvertently + # step on the model name or anything + # Note: config should already be updated + self.patch_path: Optional[str] = patch.file_id + # update created_at so process_docs will run in partial parsing + self.created_at = time.time() + self.description = patch.description + self.columns = patch.columns + + def same_contents(self, old) -> bool: if old is None: return False @@ -339,102 +372,198 @@ def same_contents(self: T, old: Optional[T]) -> bool: @dataclass -class ParsedAnalysisNode(ParsedNode): +class InjectedCTE(dbtClassMixin, Replaceable): + """Used in CompiledNodes as part of ephemeral model processing""" + + id: str + sql: str + + +@dataclass +class CompiledNode(ParsedNode): + """Contains attributes necessary for SQL files and nodes with refs, sources, etc, + so all ManifestNodes except SeedNode.""" + + language: str = "sql" + refs: List[List[str]] = field(default_factory=list) + sources: List[List[str]] = field(default_factory=list) + metrics: List[List[str]] = field(default_factory=list) + depends_on: DependsOn = field(default_factory=DependsOn) + compiled_path: Optional[str] = None + compiled: bool = False + compiled_code: Optional[str] = None + extra_ctes_injected: bool = False + extra_ctes: List[InjectedCTE] = field(default_factory=list) + _pre_injected_sql: Optional[str] = None + + @property + def empty(self): + return not self.raw_code.strip() + + def set_cte(self, cte_id: str, sql: str): + """This is the equivalent of what self.extra_ctes[cte_id] = sql would + do if extra_ctes were an OrderedDict + """ + for cte in self.extra_ctes: + if cte.id == cte_id: + cte.sql = sql + break + else: + self.extra_ctes.append(InjectedCTE(id=cte_id, sql=sql)) + + def __post_serialize__(self, dct): + dct = super().__post_serialize__(dct) + if "_pre_injected_sql" in dct: + del dct["_pre_injected_sql"] + # Remove compiled attributes + if "compiled" in dct and dct["compiled"] is False: + del dct["compiled"] + del dct["extra_ctes_injected"] + del dct["extra_ctes"] + # "omit_none" means these might not be in the dictionary + if "compiled_code" in dct: + del dct["compiled_code"] + return dct + + @property + def depends_on_nodes(self): + return self.depends_on.nodes + + @property + def depends_on_macros(self): + return self.depends_on.macros + + +# ==================================== +# CompiledNode subclasses +# ==================================== + + +@dataclass +class AnalysisNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Analysis]}) @dataclass -class ParsedHookNode(ParsedNode): +class HookNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Operation]}) index: Optional[int] = None @dataclass -class ParsedModelNode(ParsedNode): +class ModelNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Model]}) # TODO: rm? @dataclass -class ParsedRPCNode(ParsedNode): +class RPCNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.RPCCall]}) @dataclass -class ParsedSqlNode(ParsedNode): +class SqlNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.SqlOperation]}) -def same_seeds(first: ParsedNode, second: ParsedNode) -> bool: - # for seeds, we check the hashes. If the hashes are different types, - # no match. If the hashes are both the same 'path', log a warning and - # assume they are the same - # if the current checksum is a path, we want to log a warning. - result = first.checksum == second.checksum - - if first.checksum.name == "path": - msg: str - if second.checksum.name != "path": - msg = ( - f"Found a seed ({first.package_name}.{first.name}) " - f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was " - f"<={MAXIMUM_SEED_SIZE_NAME}, so it has changed" - ) - elif result: - msg = ( - f"Found a seed ({first.package_name}.{first.name}) " - f">{MAXIMUM_SEED_SIZE_NAME} in size at the same path, dbt " - f"cannot tell if it has changed: assuming they are the same" - ) - elif not result: - msg = ( - f"Found a seed ({first.package_name}.{first.name}) " - f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was in " - f"a different location, assuming it has changed" - ) - else: - msg = ( - f"Found a seed ({first.package_name}.{first.name}) " - f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file had a " - f"checksum type of {second.checksum.name}, so it has changed" - ) - warn_or_error(msg, node=first) - - return result +# ==================================== +# Seed node +# ==================================== @dataclass -class ParsedSeedNode(ParsedNode): - # keep this in sync with CompiledSeedNode! +class SeedNode(ParsedNode): # No SQLDefaults! resource_type: NodeType = field(metadata={"restrict": [NodeType.Seed]}) config: SeedConfig = field(default_factory=SeedConfig) + # seeds need the root_path because the contents are not loaded initially + # and we need the root_path to load the seed later + root_path: Optional[str] = None + + def same_seeds(self, other: "SeedNode") -> bool: + # for seeds, we check the hashes. If the hashes are different types, + # no match. If the hashes are both the same 'path', log a warning and + # assume they are the same + # if the current checksum is a path, we want to log a warning. + result = self.checksum == other.checksum + + if self.checksum.name == "path": + msg: str + if other.checksum.name != "path": + warn_or_error( + SeedIncreased(package_name=self.package_name, name=self.name), node=self + ) + elif result: + warn_or_error( + SeedExceedsLimitSamePath(package_name=self.package_name, name=self.name), + node=self, + ) + elif not result: + warn_or_error( + SeedExceedsLimitAndPathChanged(package_name=self.package_name, name=self.name), + node=self, + ) + else: + warn_or_error( + SeedExceedsLimitChecksumChanged( + package_name=self.package_name, + name=self.name, + checksum_name=other.checksum.name, + ), + node=self, + ) + + return result @property def empty(self): """Seeds are never empty""" return False - def same_body(self: T, other: T) -> bool: - return same_seeds(self, other) + def same_body(self, other) -> bool: + return self.same_seeds(other) + @property + def depends_on_nodes(self): + return [] -@dataclass -class TestMetadata(dbtClassMixin, Replaceable): - name: str - # kwargs are the args that are left in the test builder after - # removing configs. They are set from the test builder when - # the test node is created. - kwargs: Dict[str, Any] = field(default_factory=dict) - namespace: Optional[str] = None + @property + def depends_on_macros(self): + return [] + @property + def extra_ctes(self): + return [] -@dataclass -class HasTestMetadata(dbtClassMixin): - test_metadata: TestMetadata + @property + def extra_ctes_injected(self): + return False + + @property + def language(self): + return "sql" + + +# ==================================== +# Singular Test node +# ==================================== + + +class TestShouldStoreFailures: + @property + def should_store_failures(self): + if self.config.store_failures: + return self.config.store_failures + return flags.STORE_FAILURES + + @property + def is_relational(self): + if self.should_store_failures: + return True + return False @dataclass -class ParsedSingularTestNode(ParsedNode): +class SingularTestNode(TestShouldStoreFailures, CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) # Was not able to make mypy happy and keep the code working. We need to # refactor the various configs. @@ -445,9 +574,30 @@ def test_node_type(self): return "singular" +# ==================================== +# Generic Test node +# ==================================== + + +@dataclass +class TestMetadata(dbtClassMixin, Replaceable): + name: str + # kwargs are the args that are left in the test builder after + # removing configs. They are set from the test builder when + # the test node is created. + kwargs: Dict[str, Any] = field(default_factory=dict) + namespace: Optional[str] = None + + +# This has to be separated out because it has no default and so +# has to be included as a superclass, not an attribute @dataclass -class ParsedGenericTestNode(ParsedNode, HasTestMetadata): - # keep this in sync with CompiledGenericTestNode! +class HasTestMetadata(dbtClassMixin): + test_metadata: TestMetadata + + +@dataclass +class GenericTestNode(TestShouldStoreFailures, CompiledNode, HasTestMetadata): resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) column_name: Optional[str] = None file_key_name: Optional[str] = None @@ -466,54 +616,39 @@ def test_node_type(self): return "generic" +# ==================================== +# Snapshot node +# ==================================== + + @dataclass -class IntermediateSnapshotNode(ParsedNode): +class IntermediateSnapshotNode(CompiledNode): # at an intermediate stage in parsing, where we've built something better # than an unparsed node for rendering in parse mode, it's pretty possible # that we won't have critical snapshot-related information that is only # defined in config blocks. To fix that, we have an intermediate type that # uses a regular node config, which the snapshot parser will then convert - # into a full ParsedSnapshotNode after rendering. + # into a full ParsedSnapshotNode after rendering. Note: it currently does + # not work to set snapshot config in schema files because of the validation. resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]}) config: EmptySnapshotConfig = field(default_factory=EmptySnapshotConfig) @dataclass -class ParsedSnapshotNode(ParsedNode): +class SnapshotNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]}) config: SnapshotConfig -@dataclass -class ParsedPatch(HasYamlMetadata, Replaceable): - name: str - description: str - meta: Dict[str, Any] - docs: Docs - config: Dict[str, Any] +# ==================================== +# Macro +# ==================================== -# The parsed node update is only the 'patch', not the test. The test became a -# regular parsed node. Note that description and columns must be present, but -# may be empty. @dataclass -class ParsedNodePatch(ParsedPatch): - columns: Dict[str, ColumnInfo] - - -@dataclass -class ParsedMacroPatch(ParsedPatch): - arguments: List[MacroArgument] = field(default_factory=list) - - -@dataclass -class ParsedMacro(UnparsedBaseNode, HasUniqueID): - name: str +class Macro(BaseNode): macro_sql: str resource_type: NodeType = field(metadata={"restrict": [NodeType.Macro]}) - # TODO: can macros even have tags? - tags: List[str] = field(default_factory=list) - # TODO: is this ever populated? depends_on: MacroDependsOn = field(default_factory=MacroDependsOn) description: str = "" meta: Dict[str, Any] = field(default_factory=dict) @@ -523,7 +658,7 @@ class ParsedMacro(UnparsedBaseNode, HasUniqueID): created_at: float = field(default_factory=lambda: time.time()) supported_languages: Optional[List[ModelLanguage]] = None - def patch(self, patch: ParsedMacroPatch): + def patch(self, patch: "ParsedMacroPatch"): self.patch_path: Optional[str] = patch.file_id self.description = patch.description self.created_at = time.time() @@ -531,24 +666,33 @@ def patch(self, patch: ParsedMacroPatch): self.docs = patch.docs self.arguments = patch.arguments - def same_contents(self, other: Optional["ParsedMacro"]) -> bool: + def same_contents(self, other: Optional["Macro"]) -> bool: if other is None: return False # the only thing that makes one macro different from another with the # same name/package is its content return self.macro_sql == other.macro_sql + @property + def depends_on_macros(self): + return self.depends_on.macros + + +# ==================================== +# Documentation node +# ==================================== + @dataclass -class ParsedDocumentation(UnparsedDocumentation, HasUniqueID): - name: str +class Documentation(BaseNode): block_contents: str + resource_type: NodeType = field(metadata={"restrict": [NodeType.Documentation]}) @property def search_name(self): return self.name - def same_contents(self, other: Optional["ParsedDocumentation"]) -> bool: + def same_contents(self, other: Optional["Documentation"]) -> bool: if other is None: return False # the only thing that makes one doc different from another with the @@ -556,6 +700,11 @@ def same_contents(self, other: Optional["ParsedDocumentation"]) -> bool: return self.block_contents == other.block_contents +# ==================================== +# Source node +# ==================================== + + def normalize_test(testdef: TestDef) -> Dict[str, Any]: if isinstance(testdef, str): return {testdef: {}} @@ -564,11 +713,12 @@ def normalize_test(testdef: TestDef) -> Dict[str, Any]: @dataclass -class UnpatchedSourceDefinition(UnparsedBaseNode, HasUniqueID, HasFqn): +class UnpatchedSourceDefinition(BaseNode): source: UnparsedSourceDefinition table: UnparsedSourceTableDefinition + fqn: List[str] resource_type: NodeType = field(metadata={"restrict": [NodeType.Source]}) - patch_path: Optional[Path] = None + patch_path: Optional[str] = None def get_full_source_name(self): return f"{self.source.name}_{self.table.name}" @@ -576,10 +726,6 @@ def get_full_source_name(self): def get_source_representation(self): return f'source("{self.source.name}", "{self.table.name}")' - @property - def name(self) -> str: - return self.get_full_source_name() - @property def quote_columns(self) -> Optional[bool]: result = None @@ -611,13 +757,7 @@ def tests(self) -> List[TestDef]: @dataclass -class ParsedSourceMandatory( - UnparsedBaseNode, - HasUniqueID, - HasRelationMetadata, - HasFqn, -): - name: str +class ParsedSourceMandatory(GraphNode, HasRelationMetadata): source_name: str source_description: str loader: str @@ -626,7 +766,7 @@ class ParsedSourceMandatory( @dataclass -class ParsedSourceDefinition(NodeInfoMixin, ParsedSourceMandatory): +class SourceDefinition(NodeInfoMixin, ParsedSourceMandatory): quoting: Quoting = field(default_factory=Quoting) loaded_at_field: Optional[str] = None freshness: Optional[FreshnessThreshold] = None @@ -637,7 +777,7 @@ class ParsedSourceDefinition(NodeInfoMixin, ParsedSourceMandatory): source_meta: Dict[str, Any] = field(default_factory=dict) tags: List[str] = field(default_factory=list) config: SourceConfig = field(default_factory=SourceConfig) - patch_path: Optional[Path] = None + patch_path: Optional[str] = None unrendered_config: Dict[str, Any] = field(default_factory=dict) relation_name: Optional[str] = None created_at: float = field(default_factory=lambda: time.time()) @@ -647,7 +787,7 @@ def __post_serialize__(self, dct): del dct["_event_status"] return dct - def same_database_representation(self, other: "ParsedSourceDefinition") -> bool: + def same_database_representation(self, other: "SourceDefinition") -> bool: return ( self.database == other.database and self.schema == other.schema @@ -655,26 +795,26 @@ def same_database_representation(self, other: "ParsedSourceDefinition") -> bool: and True ) - def same_quoting(self, other: "ParsedSourceDefinition") -> bool: + def same_quoting(self, other: "SourceDefinition") -> bool: return self.quoting == other.quoting - def same_freshness(self, other: "ParsedSourceDefinition") -> bool: + def same_freshness(self, other: "SourceDefinition") -> bool: return ( self.freshness == other.freshness and self.loaded_at_field == other.loaded_at_field and True ) - def same_external(self, other: "ParsedSourceDefinition") -> bool: + def same_external(self, other: "SourceDefinition") -> bool: return self.external == other.external - def same_config(self, old: "ParsedSourceDefinition") -> bool: + def same_config(self, old: "SourceDefinition") -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self, old: Optional["ParsedSourceDefinition"]) -> bool: + def same_contents(self, old: Optional["SourceDefinition"]) -> bool: # existing when it didn't before is a change! if old is None: return True @@ -740,12 +880,16 @@ def search_name(self): return f"{self.source_name}.{self.name}" +# ==================================== +# Exposure node +# ==================================== + + @dataclass -class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn): - name: str +class Exposure(GraphNode): type: ExposureType owner: ExposureOwner - resource_type: NodeType = NodeType.Exposure + resource_type: NodeType = field(metadata={"restrict": [NodeType.Exposure]}) description: str = "" label: Optional[str] = None maturity: Optional[MaturityType] = None @@ -757,6 +901,7 @@ class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn): depends_on: DependsOn = field(default_factory=DependsOn) refs: List[List[str]] = field(default_factory=list) sources: List[List[str]] = field(default_factory=list) + metrics: List[List[str]] = field(default_factory=list) created_at: float = field(default_factory=lambda: time.time()) @property @@ -767,34 +912,34 @@ def depends_on_nodes(self): def search_name(self): return self.name - def same_depends_on(self, old: "ParsedExposure") -> bool: + def same_depends_on(self, old: "Exposure") -> bool: return set(self.depends_on.nodes) == set(old.depends_on.nodes) - def same_description(self, old: "ParsedExposure") -> bool: + def same_description(self, old: "Exposure") -> bool: return self.description == old.description - def same_label(self, old: "ParsedExposure") -> bool: + def same_label(self, old: "Exposure") -> bool: return self.label == old.label - def same_maturity(self, old: "ParsedExposure") -> bool: + def same_maturity(self, old: "Exposure") -> bool: return self.maturity == old.maturity - def same_owner(self, old: "ParsedExposure") -> bool: + def same_owner(self, old: "Exposure") -> bool: return self.owner == old.owner - def same_exposure_type(self, old: "ParsedExposure") -> bool: + def same_exposure_type(self, old: "Exposure") -> bool: return self.type == old.type - def same_url(self, old: "ParsedExposure") -> bool: + def same_url(self, old: "Exposure") -> bool: return self.url == old.url - def same_config(self, old: "ParsedExposure") -> bool: + def same_config(self, old: "Exposure") -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self, old: Optional["ParsedExposure"]) -> bool: + def same_contents(self, old: Optional["Exposure"]) -> bool: # existing when it didn't before is a change! # metadata/tags changes are not "changes" if old is None: @@ -814,6 +959,11 @@ def same_contents(self, old: Optional["ParsedExposure"]) -> bool: ) +# ==================================== +# Metric node +# ==================================== + + @dataclass class MetricReference(dbtClassMixin, Replaceable): sql: Optional[Union[str, int]] @@ -821,7 +971,7 @@ class MetricReference(dbtClassMixin, Replaceable): @dataclass -class ParsedMetric(UnparsedBaseNode, HasUniqueID, HasFqn): +class Metric(GraphNode): name: str description: str label: str @@ -831,10 +981,10 @@ class ParsedMetric(UnparsedBaseNode, HasUniqueID, HasFqn): filters: List[MetricFilter] time_grains: List[str] dimensions: List[str] + resource_type: NodeType = field(metadata={"restrict": [NodeType.Metric]}) window: Optional[MetricTime] = None model: Optional[str] = None model_unique_id: Optional[str] = None - resource_type: NodeType = NodeType.Metric meta: Dict[str, Any] = field(default_factory=dict) tags: List[str] = field(default_factory=list) config: MetricConfig = field(default_factory=MetricConfig) @@ -853,43 +1003,43 @@ def depends_on_nodes(self): def search_name(self): return self.name - def same_model(self, old: "ParsedMetric") -> bool: + def same_model(self, old: "Metric") -> bool: return self.model == old.model - def same_window(self, old: "ParsedMetric") -> bool: + def same_window(self, old: "Metric") -> bool: return self.window == old.window - def same_dimensions(self, old: "ParsedMetric") -> bool: + def same_dimensions(self, old: "Metric") -> bool: return self.dimensions == old.dimensions - def same_filters(self, old: "ParsedMetric") -> bool: + def same_filters(self, old: "Metric") -> bool: return self.filters == old.filters - def same_description(self, old: "ParsedMetric") -> bool: + def same_description(self, old: "Metric") -> bool: return self.description == old.description - def same_label(self, old: "ParsedMetric") -> bool: + def same_label(self, old: "Metric") -> bool: return self.label == old.label - def same_calculation_method(self, old: "ParsedMetric") -> bool: + def same_calculation_method(self, old: "Metric") -> bool: return self.calculation_method == old.calculation_method - def same_expression(self, old: "ParsedMetric") -> bool: + def same_expression(self, old: "Metric") -> bool: return self.expression == old.expression - def same_timestamp(self, old: "ParsedMetric") -> bool: + def same_timestamp(self, old: "Metric") -> bool: return self.timestamp == old.timestamp - def same_time_grains(self, old: "ParsedMetric") -> bool: + def same_time_grains(self, old: "Metric") -> bool: return self.time_grains == old.time_grains - def same_config(self, old: "ParsedMetric") -> bool: + def same_config(self, old: "Metric") -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self, old: Optional["ParsedMetric"]) -> bool: + def same_contents(self, old: Optional["Metric"]) -> bool: # existing when it didn't before is a change! # metadata/tags changes are not "changes" if old is None: @@ -911,24 +1061,77 @@ def same_contents(self, old: Optional["ParsedMetric"]) -> bool: ) -ManifestNodes = Union[ - ParsedAnalysisNode, - ParsedSingularTestNode, - ParsedHookNode, - ParsedModelNode, - ParsedRPCNode, - ParsedSqlNode, - ParsedGenericTestNode, - ParsedSeedNode, - ParsedSnapshotNode, +# ==================================== +# Patches +# ==================================== + + +@dataclass +class ParsedPatch(HasYamlMetadata, Replaceable): + name: str + description: str + meta: Dict[str, Any] + docs: Docs + config: Dict[str, Any] + + +# The parsed node update is only the 'patch', not the test. The test became a +# regular parsed node. Note that description and columns must be present, but +# may be empty. +@dataclass +class ParsedNodePatch(ParsedPatch): + columns: Dict[str, ColumnInfo] + + +@dataclass +class ParsedMacroPatch(ParsedPatch): + arguments: List[MacroArgument] = field(default_factory=list) + + +# ==================================== +# Node unions/categories +# ==================================== + + +# ManifestNode without SeedNode, which doesn't have the +# SQL related attributes +ManifestSQLNode = Union[ + AnalysisNode, + SingularTestNode, + HookNode, + ModelNode, + RPCNode, + SqlNode, + GenericTestNode, + SnapshotNode, ] +# All SQL nodes plus SeedNode (csv files) +ManifestNode = Union[ + ManifestSQLNode, + SeedNode, +] + +ResultNode = Union[ + ManifestNode, + SourceDefinition, +] + +# All nodes that can be in the DAG +GraphMemberNode = Union[ + ResultNode, + Exposure, + Metric, +] + +# All "nodes" (or node-like objects) in this file +Resource = Union[ + GraphMemberNode, + Documentation, + Macro, +] -ParsedResource = Union[ - ParsedDocumentation, - ParsedMacro, - ParsedNode, - ParsedExposure, - ParsedMetric, - ParsedSourceDefinition, +TestNode = Union[ + SingularTestNode, + GenericTestNode, ] diff --git a/core/dbt/contracts/graph/unparsed.py b/core/dbt/contracts/graph/unparsed.py index 662ec6f01ad..453dc883d7b 100644 --- a/core/dbt/contracts/graph/unparsed.py +++ b/core/dbt/contracts/graph/unparsed.py @@ -24,7 +24,6 @@ @dataclass class UnparsedBaseNode(dbtClassMixin, Replaceable): package_name: str - root_path: str path: str original_file_path: str @@ -364,7 +363,6 @@ def get_table_named(self, name: str) -> Optional[SourceTablePatch]: @dataclass class UnparsedDocumentation(dbtClassMixin, Replaceable): package_name: str - root_path: str path: str original_file_path: str diff --git a/core/dbt/contracts/project.py b/core/dbt/contracts/project.py index b56aeddaf17..2fd7434bd87 100644 --- a/core/dbt/contracts/project.py +++ b/core/dbt/contracts/project.py @@ -12,9 +12,7 @@ from typing import Optional, List, Dict, Union, Any from mashumaro.types import SerializableType -PIN_PACKAGE_URL = ( - "https://docs.getdbt.com/docs/package-management#section-specifying-package-versions" # noqa -) + DEFAULT_SEND_ANONYMOUS_USAGE_STATS = True @@ -57,6 +55,12 @@ class LocalPackage(Package): RawVersion = Union[str, float] +@dataclass +class TarballPackage(Package): + tarball: str + name: str + + @dataclass class GitPackage(Package): git: str @@ -84,7 +88,7 @@ def get_versions(self) -> List[str]: return [str(self.version)] -PackageSpec = Union[LocalPackage, GitPackage, RegistryPackage] +PackageSpec = Union[LocalPackage, TarballPackage, GitPackage, RegistryPackage] @dataclass @@ -218,7 +222,7 @@ class Project(HyphenatedDbtClassMixin, Replaceable): ), ) packages: List[PackageSpec] = field(default_factory=list) - query_comment: Optional[Union[QueryComment, NoValue, str]] = NoValue() + query_comment: Optional[Union[QueryComment, NoValue, str]] = field(default_factory=NoValue) @classmethod def validate(cls, data): @@ -253,7 +257,6 @@ class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract): static_parser: Optional[bool] = None indirect_selection: Optional[str] = None cache_selected_only: Optional[bool] = None - event_buffer_size: Optional[int] = None @dataclass diff --git a/core/dbt/contracts/relation.py b/core/dbt/contracts/relation.py index fbe18146bb4..e8cba2ad155 100644 --- a/core/dbt/contracts/relation.py +++ b/core/dbt/contracts/relation.py @@ -9,7 +9,7 @@ from dbt.dataclass_schema import dbtClassMixin, StrEnum from dbt.contracts.util import Replaceable -from dbt.exceptions import raise_dataclass_not_dict, CompilationException +from dbt.exceptions import CompilationException, DataclassNotDict from dbt.utils import deep_merge @@ -43,10 +43,10 @@ def __getitem__(self, key): raise KeyError(key) from None def __iter__(self): - raise_dataclass_not_dict(self) + raise DataclassNotDict(self) def __len__(self): - raise_dataclass_not_dict(self) + raise DataclassNotDict(self) def incorporate(self, **kwargs): value = self.to_dict(omit_none=True) diff --git a/core/dbt/contracts/results.py b/core/dbt/contracts/results.py index a3b7ce2b506..97c43396e33 100644 --- a/core/dbt/contracts/results.py +++ b/core/dbt/contracts/results.py @@ -1,6 +1,5 @@ -from dbt.contracts.graph.manifest import CompileResultNode from dbt.contracts.graph.unparsed import FreshnessThreshold -from dbt.contracts.graph.parsed import ParsedSourceDefinition +from dbt.contracts.graph.nodes import SourceDefinition, ResultNode from dbt.contracts.util import ( BaseArtifactMetadata, ArtifactMixin, @@ -11,11 +10,9 @@ from dbt.exceptions import InternalException from dbt.events.functions import fire_event from dbt.events.types import TimingInfoCollected -from dbt.events.proto_types import RunResultMsg -from dbt.logger import ( - TimingProcessor, - JsonOnly, -) +from dbt.events.proto_types import RunResultMsg, TimingInfoMsg +from dbt.events.contextvars import get_node_info +from dbt.logger import TimingProcessor from dbt.utils import lowercase, cast_to_str, cast_to_int from dbt.dataclass_schema import dbtClassMixin, StrEnum @@ -48,7 +45,14 @@ def begin(self): def end(self): self.completed_at = datetime.utcnow() + def to_msg(self): + timsg = TimingInfoMsg( + name=self.name, started_at=self.started_at, completed_at=self.completed_at + ) + return timsg + +# This is a context manager class collect_timing_info: def __init__(self, name: str): self.timing_info = TimingInfo(name=name) @@ -59,8 +63,13 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, traceback): self.timing_info.end() - with JsonOnly(), TimingProcessor(self.timing_info): - fire_event(TimingInfoCollected()) + # Note: when legacy logger is removed, we can remove the following line + with TimingProcessor(self.timing_info): + fire_event( + TimingInfoCollected( + timing_info=self.timing_info.to_msg(), node_info=get_node_info() + ) + ) class RunningStatus(StrEnum): @@ -128,13 +137,14 @@ def to_msg(self): msg.thread = self.thread_id msg.execution_time = self.execution_time msg.num_failures = cast_to_int(self.failures) - # timing_info, adapter_response, message + msg.timing_info = [ti.to_msg() for ti in self.timing] + # adapter_response return msg @dataclass class NodeResult(BaseResult): - node: CompileResultNode + node: ResultNode @dataclass @@ -220,7 +230,9 @@ def from_execution_results( generated_at: datetime, args: Dict, ): - processed_results = [process_run_result(result) for result in results] + processed_results = [ + process_run_result(result) for result in results if isinstance(result, RunResult) + ] meta = RunResultsMetadata( dbt_schema_version=str(cls.dbt_schema_version), generated_at=generated_at, @@ -271,7 +283,7 @@ def from_success( @dataclass class SourceFreshnessResult(NodeResult): - node: ParsedSourceDefinition + node: SourceDefinition status: FreshnessStatus max_loaded_at: datetime snapshotted_at: datetime diff --git a/core/dbt/contracts/sql.py b/core/dbt/contracts/sql.py index a3e5b3d58db..b80304d2565 100644 --- a/core/dbt/contracts/sql.py +++ b/core/dbt/contracts/sql.py @@ -5,7 +5,7 @@ from dbt.dataclass_schema import dbtClassMixin -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.results import ( RunResult, RunResultsArtifact, @@ -32,7 +32,7 @@ class RemoteResult(VersionedSchema): class RemoteCompileResultMixin(RemoteResult): raw_code: str compiled_code: str - node: CompileResultNode + node: ResultNode timing: List[TimingInfo] diff --git a/core/dbt/contracts/util.py b/core/dbt/contracts/util.py index f0975fda10b..99f7a35c66d 100644 --- a/core/dbt/contracts/util.py +++ b/core/dbt/contracts/util.py @@ -237,16 +237,61 @@ def rename_sql_attr(node_content: dict) -> dict: return node_content +def upgrade_node_content(node_content): + rename_sql_attr(node_content) + if node_content["resource_type"] != "seed" and "root_path" in node_content: + del node_content["root_path"] + + +def upgrade_seed_content(node_content): + # Remove compilation related attributes + for attr_name in ( + "language", + "refs", + "sources", + "metrics", + "depends_on", + "compiled_path", + "compiled", + "compiled_code", + "extra_ctes_injected", + "extra_ctes", + "relation_name", + ): + if attr_name in node_content: + del node_content[attr_name] + + def upgrade_manifest_json(manifest: dict) -> dict: for node_content in manifest.get("nodes", {}).values(): - node_content = rename_sql_attr(node_content) + upgrade_node_content(node_content) + if node_content["resource_type"] == "seed": + upgrade_seed_content(node_content) for disabled in manifest.get("disabled", {}).values(): # There can be multiple disabled nodes for the same unique_id # so make sure all the nodes get the attr renamed - disabled = [rename_sql_attr(n) for n in disabled] + for node_content in disabled: + upgrade_node_content(node_content) + if node_content["resource_type"] == "seed": + upgrade_seed_content(node_content) for metric_content in manifest.get("metrics", {}).values(): # handle attr renames + value translation ("expression" -> "derived") metric_content = rename_metric_attr(metric_content) + if "root_path" in metric_content: + del metric_content["root_path"] + for exposure_content in manifest.get("exposures", {}).values(): + if "root_path" in exposure_content: + del exposure_content["root_path"] + for source_content in manifest.get("sources", {}).values(): + if "root_path" in exposure_content: + del source_content["root_path"] + for macro_content in manifest.get("macros", {}).values(): + if "root_path" in macro_content: + del macro_content["root_path"] + for doc_content in manifest.get("docs", {}).values(): + if "root_path" in doc_content: + del doc_content["root_path"] + doc_content["resource_type"] = "doc" return manifest @@ -291,7 +336,7 @@ def read_and_check_versions(cls, path: str): expected=str(cls.dbt_schema_version), found=previous_schema_version, ) - if get_manifest_schema_version(data) <= 6: + if get_manifest_schema_version(data) <= 7: data = upgrade_manifest_json(data) return cls.from_dict(data) # type: ignore diff --git a/core/dbt/deprecations.py b/core/dbt/deprecations.py index 223091dea60..f7cee59df5a 100644 --- a/core/dbt/deprecations.py +++ b/core/dbt/deprecations.py @@ -1,14 +1,14 @@ +import abc from typing import Optional, Set, List, Dict, ClassVar import dbt.exceptions -from dbt import ui import dbt.tracking class DBTDeprecation: _name: ClassVar[Optional[str]] = None - _description: ClassVar[Optional[str]] = None + _event: ClassVar[Optional[str]] = None @property def name(self) -> str: @@ -21,66 +21,50 @@ def track_deprecation_warn(self) -> None: dbt.tracking.track_deprecation_warn({"deprecation_name": self.name}) @property - def description(self) -> str: - if self._description is not None: - return self._description - raise NotImplementedError("description not implemented for {}".format(self)) + def event(self) -> abc.ABCMeta: + if self._event is not None: + module_path = dbt.events.types + class_name = self._event + + try: + return getattr(module_path, class_name) + except AttributeError: + msg = f"Event Class `{class_name}` is not defined in `{module_path}`" + raise NameError(msg) + raise NotImplementedError("event not implemented for {}".format(self._event)) def show(self, *args, **kwargs) -> None: if self.name not in active_deprecations: - desc = self.description.format(**kwargs) - msg = ui.line_wrap_message(desc, prefix="Deprecated functionality\n\n") - dbt.exceptions.warn_or_error(msg, log_fmt=ui.warning_tag("{}")) + event = self.event(**kwargs) + dbt.events.functions.warn_or_error(event) self.track_deprecation_warn() active_deprecations.add(self.name) class PackageRedirectDeprecation(DBTDeprecation): _name = "package-redirect" - _description = """\ - The `{old_name}` package is deprecated in favor of `{new_name}`. Please update - your `packages.yml` configuration to use `{new_name}` instead. - """ + _event = "PackageRedirectDeprecation" class PackageInstallPathDeprecation(DBTDeprecation): _name = "install-packages-path" - _description = """\ - The default package install path has changed from `dbt_modules` to `dbt_packages`. - Please update `clean-targets` in `dbt_project.yml` and check `.gitignore` as well. - Or, set `packages-install-path: dbt_modules` if you'd like to keep the current value. - """ + _event = "PackageInstallPathDeprecation" -class ConfigPathDeprecation(DBTDeprecation): - _description = """\ - The `{deprecated_path}` config has been renamed to `{exp_path}`. - Please update your `dbt_project.yml` configuration to reflect this change. - """ - - -class ConfigSourcePathDeprecation(ConfigPathDeprecation): +class ConfigSourcePathDeprecation(DBTDeprecation): _name = "project-config-source-paths" + _event = "ConfigSourcePathDeprecation" -class ConfigDataPathDeprecation(ConfigPathDeprecation): +class ConfigDataPathDeprecation(DBTDeprecation): _name = "project-config-data-paths" - - -_adapter_renamed_description = """\ -The adapter function `adapter.{old_name}` is deprecated and will be removed in -a future release of dbt. Please use `adapter.{new_name}` instead. - -Documentation for {new_name} can be found here: - - https://docs.getdbt.com/docs/adapter -""" + _event = "ConfigDataPathDeprecation" def renamed_method(old_name: str, new_name: str): class AdapterDeprecationWarning(DBTDeprecation): _name = "adapter:{}".format(old_name) - _description = _adapter_renamed_description.format(old_name=old_name, new_name=new_name) + _event = "AdapterDeprecationWarning" dep = AdapterDeprecationWarning() deprecations_list.append(dep) @@ -89,26 +73,12 @@ class AdapterDeprecationWarning(DBTDeprecation): class MetricAttributesRenamed(DBTDeprecation): _name = "metric-attr-renamed" - _description = """\ -dbt-core v1.3 renamed attributes for metrics: -\n 'sql' -> 'expression' -\n 'type' -> 'calculation_method' -\n 'type: expression' -> 'calculation_method: derived' -\nThe old metric parameter names will be fully deprecated in v1.4. -\nPlease remove them from the metric definition of metric '{metric_name}' -\nRelevant issue here: https://github.com/dbt-labs/dbt-core/issues/5849 -""" + _event = "MetricAttributesRenamed" class ExposureNameDeprecation(DBTDeprecation): _name = "exposure-name" - _description = """\ - Starting in v1.3, the 'name' of an exposure should contain only letters, numbers, and underscores. - Exposures support a new property, 'label', which may contain spaces, capital letters, and special characters. - {exposure} does not follow this pattern. - Please update the 'name', and use the 'label' property for a human-friendly title. - This will raise an error in a future version of dbt-core. - """ + _event = "ExposureNameDeprecation" def warn(name, *args, **kwargs): @@ -125,12 +95,12 @@ def warn(name, *args, **kwargs): active_deprecations: Set[str] = set() deprecations_list: List[DBTDeprecation] = [ - ExposureNameDeprecation(), + PackageRedirectDeprecation(), + PackageInstallPathDeprecation(), ConfigSourcePathDeprecation(), ConfigDataPathDeprecation(), - PackageInstallPathDeprecation(), - PackageRedirectDeprecation(), MetricAttributesRenamed(), + ExposureNameDeprecation(), ] deprecations: Dict[str, DBTDeprecation] = {d.name: d for d in deprecations_list} diff --git a/core/dbt/deps/README.md b/core/dbt/deps/README.md index a00802cefbf..99c7fd6fb80 100644 --- a/core/dbt/deps/README.md +++ b/core/dbt/deps/README.md @@ -16,6 +16,8 @@ Defines the base classes of `PinnedPackage` and `UnpinnedPackage`. `downloads_directory` sets the directory packages will be downloaded to. +`_install` has retry logic if the download or untarring process hit exceptions (see `dbt.utils._connection_exception_retry`). + ## `git.py` Extends `PinnedPackage` and `UnpinnedPackage` specific to dbt packages defined with git urls. @@ -28,8 +30,10 @@ Extends `PinnedPackage` and `UnpinnedPackage` specific to dbt packages defined l Extends `PinnedPackage` and `UnpinnedPackage` specific to dbt packages defined on the dbt Hub registry. -`install` has retry logic if the download or untarring process hit exceptions (see `dbt.utils._connection_exception_retry`). ## `resolver.py` Resolves the package definition into package objects to download. + +## `tarball.py` +Extends `PinnedPackage` and `UnpinnedPackage` specific to dbt packages defined by a URL to a tarball hosted on an HTTP server. diff --git a/core/dbt/deps/base.py b/core/dbt/deps/base.py index 1557b0d7a35..f72878422aa 100644 --- a/core/dbt/deps/base.py +++ b/core/dbt/deps/base.py @@ -1,13 +1,16 @@ import abc import os +import functools import tempfile from contextlib import contextmanager +from pathlib import Path from typing import List, Optional, Generic, TypeVar from dbt.clients import system from dbt.contracts.project import ProjectPackageMetadata from dbt.events.functions import fire_event from dbt.events.types import DepsSetDownloadDirectory +from dbt.utils import _connection_exception_retry as connection_exception_retry DOWNLOADS_PATH = None @@ -74,7 +77,7 @@ def _fetch_metadata(self, project, renderer): raise NotImplementedError @abc.abstractmethod - def install(self, project): + def install(self, project, renderer): raise NotImplementedError @abc.abstractmethod @@ -97,6 +100,34 @@ def get_installation_path(self, project, renderer): def get_subdirectory(self): return None + def _install(self, project, renderer): + metadata = self.fetch_metadata(project, renderer) + + tar_name = f"{self.package}.{self.version}.tar.gz" + tar_path = (Path(get_downloads_path()) / tar_name).resolve(strict=False) + system.make_directory(str(tar_path.parent)) + + download_url = metadata.downloads.tarball + deps_path = project.packages_install_path + package_name = self.get_project_name(project, renderer) + + download_untar_fn = functools.partial( + self.download_and_untar, download_url, str(tar_path), deps_path, package_name + ) + connection_exception_retry(download_untar_fn, 5) + + def download_and_untar(self, download_url, tar_path, deps_path, package_name): + """ + Sometimes the download of the files fails and we want to retry. Sometimes the + download appears successful but the file did not make it through as expected + (generally due to a github incident). Either way we want to retry downloading + and untarring to see if we can get a success. Call this within + `_connection_exception_retry` + """ + + system.download(download_url, tar_path) + system.untar_package(tar_path, deps_path, package_name) + SomePinned = TypeVar("SomePinned", bound=PinnedPackage) SomeUnpinned = TypeVar("SomeUnpinned", bound="UnpinnedPackage") diff --git a/core/dbt/deps/git.py b/core/dbt/deps/git.py index 2b08e04632f..683ce2c4dc7 100644 --- a/core/dbt/deps/git.py +++ b/core/dbt/deps/git.py @@ -10,14 +10,9 @@ GitPackage, ) from dbt.deps.base import PinnedPackage, UnpinnedPackage, get_downloads_path -from dbt.exceptions import ExecutableError, warn_or_error, raise_dependency_error -from dbt.events.functions import fire_event -from dbt.events.types import EnsureGitInstalled -from dbt import ui - -PIN_PACKAGE_URL = ( - "https://docs.getdbt.com/docs/package-management#section-specifying-package-versions" # noqa -) +from dbt.exceptions import ExecutableError, MultipleVersionGitDeps +from dbt.events.functions import fire_event, warn_or_error +from dbt.events.types import EnsureGitInstalled, DepsUnpinned def md5sum(s: str): @@ -63,14 +58,6 @@ def nice_version_name(self): else: return "revision {}".format(self.revision) - def unpinned_msg(self): - if self.revision == "HEAD": - return "not pinned, using HEAD (default branch)" - elif self.revision in ("main", "master"): - return f'pinned to the "{self.revision}" branch' - else: - return None - def _checkout(self): """Performs a shallow clone of the repository into the downloads directory. This function can be called repeatedly. If the project has @@ -95,14 +82,8 @@ def _fetch_metadata( ) -> ProjectPackageMetadata: path = self._checkout() - if self.unpinned_msg() and self.warn_unpinned: - warn_or_error( - 'The git package "{}" \n\tis {}.\n\tThis can introduce ' - "breaking changes into your project without warning!\n\nSee {}".format( - self.git, self.unpinned_msg(), PIN_PACKAGE_URL - ), - log_fmt=ui.yellow("WARNING: {}"), - ) + if (self.revision == "HEAD" or self.revision in ("main", "master")) and self.warn_unpinned: + warn_or_error(DepsUnpinned(git=self.git)) partial = PartialProject.from_project_root(path) return partial.render_package_metadata(renderer) @@ -165,10 +146,7 @@ def resolved(self) -> GitPinnedPackage: if len(requested) == 0: requested = {"HEAD"} elif len(requested) > 1: - raise_dependency_error( - "git dependencies should contain exactly one version. " - "{} contains: {}".format(self.git, requested) - ) + raise MultipleVersionGitDeps(self.git, requested) return GitPinnedPackage( git=self.git, diff --git a/core/dbt/deps/registry.py b/core/dbt/deps/registry.py index bd8263e4001..f3398f4b16f 100644 --- a/core/dbt/deps/registry.py +++ b/core/dbt/deps/registry.py @@ -1,23 +1,20 @@ -import os -import functools from typing import List from dbt import semver from dbt import flags from dbt.version import get_installed_version -from dbt.clients import registry, system +from dbt.clients import registry from dbt.contracts.project import ( RegistryPackageMetadata, RegistryPackage, ) -from dbt.deps.base import PinnedPackage, UnpinnedPackage, get_downloads_path +from dbt.deps.base import PinnedPackage, UnpinnedPackage from dbt.exceptions import ( - package_version_not_found, - VersionsNotCompatibleException, DependencyException, - package_not_found, + PackageNotFound, + PackageVersionNotFound, + VersionsNotCompatibleException, ) -from dbt.utils import _connection_exception_retry as connection_exception_retry class RegistryPackageMixin: @@ -60,32 +57,7 @@ def _fetch_metadata(self, project, renderer) -> RegistryPackageMetadata: return RegistryPackageMetadata.from_dict(dct) def install(self, project, renderer): - metadata = self.fetch_metadata(project, renderer) - - tar_name = "{}.{}.tar.gz".format(self.package, self.version) - tar_path = os.path.realpath(os.path.join(get_downloads_path(), tar_name)) - system.make_directory(os.path.dirname(tar_path)) - - download_url = metadata.downloads.tarball - deps_path = project.packages_install_path - package_name = self.get_project_name(project, renderer) - - download_untar_fn = functools.partial( - self.download_and_untar, download_url, tar_path, deps_path, package_name - ) - connection_exception_retry(download_untar_fn, 5) - - def download_and_untar(self, download_url, tar_path, deps_path, package_name): - """ - Sometimes the download of the files fails and we want to retry. Sometimes the - download appears successful but the file did not make it through as expected - (generally due to a github incident). Either way we want to retry downloading - and untarring to see if we can get a success. Call this within - `_connection_exception_retry` - """ - - system.download(download_url, tar_path) - system.untar_package(tar_path, deps_path, package_name) + self._install(project, renderer) class RegistryUnpinnedPackage(RegistryPackageMixin, UnpinnedPackage[RegistryPinnedPackage]): @@ -99,7 +71,7 @@ def __init__( def _check_in_index(self): index = registry.index_cached() if self.package not in index: - package_not_found(self.package) + raise PackageNotFound(self.package) @classmethod def from_contract(cls, contract: RegistryPackage) -> "RegistryUnpinnedPackage": @@ -146,7 +118,7 @@ def resolved(self) -> RegistryPinnedPackage: target = None if not target: # raise an exception if no installable target version is found - package_version_not_found(self.package, range_, installable, should_version_check) + raise PackageVersionNotFound(self.package, range_, installable, should_version_check) latest_compatible = installable[-1] return RegistryPinnedPackage( package=self.package, version=target, version_latest=latest_compatible diff --git a/core/dbt/deps/resolver.py b/core/dbt/deps/resolver.py index 7313280a3ca..b83a3bdee7d 100644 --- a/core/dbt/deps/resolver.py +++ b/core/dbt/deps/resolver.py @@ -1,22 +1,29 @@ from dataclasses import dataclass, field from typing import Dict, List, NoReturn, Union, Type, Iterator, Set, Any -from dbt.exceptions import raise_dependency_error, InternalException +from dbt.exceptions import ( + DuplicateDependencyToRoot, + DuplicateProjectDependency, + MismatchedDependencyTypes, + InternalException, +) from dbt.config import Project from dbt.config.renderer import PackageRenderer from dbt.deps.base import BasePackage, PinnedPackage, UnpinnedPackage from dbt.deps.local import LocalUnpinnedPackage +from dbt.deps.tarball import TarballUnpinnedPackage from dbt.deps.git import GitUnpinnedPackage from dbt.deps.registry import RegistryUnpinnedPackage from dbt.contracts.project import ( LocalPackage, + TarballPackage, GitPackage, RegistryPackage, ) -PackageContract = Union[LocalPackage, GitPackage, RegistryPackage] +PackageContract = Union[LocalPackage, TarballPackage, GitPackage, RegistryPackage] @dataclass @@ -49,10 +56,7 @@ def __setitem__(self, key: BasePackage, value): self.packages[key_str] = value def _mismatched_types(self, old: UnpinnedPackage, new: UnpinnedPackage) -> NoReturn: - raise_dependency_error( - f"Cannot incorporate {new} ({new.__class__.__name__}) in {old} " - f"({old.__class__.__name__}): mismatched types" - ) + raise MismatchedDependencyTypes(new, old) def incorporate(self, package: UnpinnedPackage): key: str = self._pick_key(package) @@ -69,6 +73,8 @@ def update_from(self, src: List[PackageContract]) -> None: for contract in src: if isinstance(contract, LocalPackage): pkg = LocalUnpinnedPackage.from_contract(contract) + elif isinstance(contract, TarballPackage): + pkg = TarballUnpinnedPackage.from_contract(contract) elif isinstance(contract, GitPackage): pkg = GitUnpinnedPackage.from_contract(contract) elif isinstance(contract, RegistryPackage): @@ -101,17 +107,9 @@ def _check_for_duplicate_project_names( for package in final_deps: project_name = package.get_project_name(project, renderer) if project_name in seen: - raise_dependency_error( - f'Found duplicate project "{project_name}". This occurs when ' - "a dependency has the same project name as some other " - "dependency." - ) + raise DuplicateProjectDependency(project_name) elif project_name == project.project_name: - raise_dependency_error( - "Found a dependency with the same name as the root project " - f'"{project_name}". Package names must be unique in a project.' - " Please rename one of these packages." - ) + raise DuplicateDependencyToRoot(project_name) seen.add(project_name) diff --git a/core/dbt/deps/tarball.py b/core/dbt/deps/tarball.py new file mode 100644 index 00000000000..16c9cb0a20d --- /dev/null +++ b/core/dbt/deps/tarball.py @@ -0,0 +1,74 @@ +from dbt.contracts.project import RegistryPackageMetadata, TarballPackage +from dbt.deps.base import PinnedPackage, UnpinnedPackage + + +class TarballPackageMixin: + def __init__(self, tarball: str) -> None: + super().__init__() + self.tarball = tarball + + @property + def name(self): + return self.tarball + + def source_type(self) -> str: + return "tarball" + + +class TarballPinnedPackage(TarballPackageMixin, PinnedPackage): + def __init__(self, tarball: str, package: str) -> None: + super().__init__(tarball) + # setup to recycle RegistryPinnedPackage fns + self.package = package + self.version = "tarball" + + @property + def name(self): + return self.package + + def get_version(self): + return self.version + + def nice_version_name(self): + return f"tarball (url: {self.tarball})" + + def _fetch_metadata(self, project, renderer): + """ + recycle RegistryPackageMetadata so that we can use the install and + download_and_untar from RegistryPinnedPackage next. + build RegistryPackageMetadata from info passed via packages.yml since no + 'metadata' service exists in this case. + """ + + dct = { + "name": self.package, + "packages": [], # note: required by RegistryPackageMetadata + "downloads": {"tarball": self.tarball}, + } + + return RegistryPackageMetadata.from_dict(dct) + + def install(self, project, renderer): + self._install(project, renderer) + + +class TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]): + def __init__( + self, + tarball: str, + package: str, + ) -> None: + super().__init__(tarball) + # setup to recycle RegistryPinnedPackage fns + self.package = package + self.version = "tarball" + + @classmethod + def from_contract(cls, contract: TarballPackage) -> "TarballUnpinnedPackage": + return cls(tarball=contract.tarball, package=contract.name) + + def incorporate(self, other: "TarballUnpinnedPackage") -> "TarballUnpinnedPackage": + return TarballUnpinnedPackage(tarball=self.tarball, package=self.package) + + def resolved(self) -> TarballPinnedPackage: + return TarballPinnedPackage(tarball=self.tarball, package=self.package) diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle new file mode 100644 index 0000000000000000000000000000000000000000..8aaad5e25b0b97cc741c122d6608193f2544081f GIT binary patch literal 65160 zcmd6wd7LC?S;sfCyK{HXxmPwJ6P9BW*cnJrxrFTIBJO53lAwZFit6r~sp_q+>aAmU zC&>yyNU~ZQB!!Ci4dMYJhai_I5&;pB8x=%B0q+|R^!FasZ})W7h|l-T zZ++`|pXYg>x4Qd9yI*nPt2^YsY(DO~UT}Wd3C?;^7<8N<$+OvGspq$D>??XGzv|xn zLVh^gX?MH%YBt#ln@Qw2EicNm>sH*bKSx^4o%ZkG^YC4e741+aqJe~>MlC0DS~qxL0Fcklvt7$kER-d3;v`C&Ns8(O2=h3r z1!)Jj!deTXj-5!w8H<%HT=DJducb!|N@#|Tc{X;Fx4tZkb|-ejN6Gjd;#C$+Hp7;4 zq~V9S7un>ixNUse90U2L8zA^sTe{?$!=W zUl5Qj_qzw&>qW~ksD-%wxqD1?9g7^_v16xKusaD`_D1jRgnVnQb&`Bl^%(c*M^lub`V3!T|4~p*vE$6%zH7>@H z$+W_!T!ZcYs^5i42x2}4-SGY^6?W`9JEAYtGR~MH$-E!9K z)K9D^#68x~_S>x%CLc&;gCa+;Lar0zu0EipiUysql>(yR(yN$H!lo5A#LBeLDe_F! z6~k3l?1GY#FLbXJc5JV>NtdH{2O1kDL6|sJprvymn>@5L@$Y0+Fe&z@U{ zXOLoMq-;bCLE^1>!uO7)!Ra782kX%{c-)ju+WnL&9GAUPua%=j-WE0QriefCtNM%9 z(aI@`^z0(1=fYJ}HH&U2mYw$Pvci3|`xrH%VpaqZ1C26;0#;G0`A#KNVH3p&g7iA> zHI3mjLHO?e@nJD6ta7Z*Vj(Qb>+ION+&bsA#5&rcda0|Cb`z^v*j+F1y#Qh*IaLj< zx0E}2Ko!qQnF!IEktYqKU?@AN8U%N+Lya^+h*i|g$v)>IPlN};U%-J*A_nvBLSx%k z1G-nbkJNTm#z0c(is(sI%=cCFX`Zr?s!nQNwhy75jB}TkU=LH_1Ek;8_Eg7bm_cGC zUsDei>J5022{|C_ZwPxJr!Xx`*6gz$Am~E(vF>46>t5wvt^PRVK1#Kpk?mn+BYW^V z_bAZ_39Pi+LRbnfEJGPNj0zEB=w-vk5{Fxo#i|n=GAM_E?L(wcg`#>v%2f7W+)=M7 zjh5`H>Vz-vItUD*1gD`6i#l8{Y|yfhkY@{UrXm1@0q;iI$BW4GUOf+WRegdUQ1e@q z6ENkLq?C=k`%Su|iiZ0YPEJnWtgi1RS`#lSfvQ3TkCxtQ42&cdTUCe)!V;3=_R49P z@?KoMYdJxTtQsjGVk%f>LG0L3(-pyG--N^(F8b>GuO=i0+f<>%$qAWQSI=j^nvV#8 z)Hvwd5x%;Za8Fs$KsjmMQ>mL+Z&0D$eO8#mH0h=y^eQ65-7+%Nm#5YdVhu!4h=CBc z3;!iAPHB% z6AHCjA;wAEHf*Nc31&}olcXCzb!Da53YOb(%kjOl(Q@DAlt1@OPu_Kg<2uHQqSG4hk{VO(2V#9vmrsteEv9x;h)*F|ee&el2 zZ#i+@(Hm}f_KmmS{)}9WyvKA^n?3B|?G7y2iKDAGKj-Mp=y&48b-uS8M(ahNX|e4; zkCn16TwgZcZ6R_-iO@`3`i!bByoe0M+&i=B!oe(yrv15xzO2G1;1Ps4yqbqgw$UhY|mgb{j?JRb^q){oAa>gh`?3(0o5Z3yS{VQ z@nKnHBr5iZtE$DKYBA&w`o;Ez1|vbJr?sFu$~xg;5M+yZL-wnyI_%Bmu1nY?S-w}5 z_xYw?1SIR{RK0R5%E=ZKrgW8z-`e>lJ9a3MdKxN7=v!A(o`yOYLG;fMx3dZLlHO7G zfVDx8*gS1^Eoz{H!VamLB7EythM`7s#}6Zhq>C2;yBVWyB#B^HD7+l|fGLlhGsu}F zmh0Fud@lgb^*2$wY3zn%@4l+U>o_p{#Vziu5xGCH9XP&wsFNKu5eEtTFU9RQBk39g zBpZ4Dm4zx_X4*wqF0j2F_~&8WY=LB!C>Wk6+ST zE;Z53D4|xeOGRnNJ`L}yv|6NZ@K*?zshcds&~)EXp0$XR+Dk%tq$8TcqVn!yJy-VTRQZMsZyVZ-Ejt^f5{IOZ?^TjYFt9!Pn z8(u6$P5IjF&E0^>#*6HT)~G#&hya)9B(~>cJ*EYn{Y7mv3XzTIB~*GkAcjZ^x=mLE znc3LY-+1IZ9{IQzXd_*dO#&)WuX103;h-q8?YzjY!l{?l%{DC+S)X<`p#Cj+i?cBVx9fo| zE=c|ru3_$tFhpECCY+uxtoel4MR|dU!Lo6Y!axcY8}FJ<=)!IwF@R)9;zn4fmP{JL z6U!;pn>y!*OR&yf3+A$Te^X|CEaY|y1ECve&$zGI?W*v0I0qyp%*h6&7#2m<79dcI z5Gh^9OvBUIOctv|bW;^lmqg0Hs5^kvvDdYbNDOR|K(-oo^E8r|v0GfxmE8@b(F05P zo-{ZEKPNWeEi2jR7P&xu?R%j9Sd5O}MKBPwbgB^P0p!e!8kuZZ=)El(BB)qxKXGKo z4&jOyO?AsTEAmGo63KQu!4a&wUbFqCnI5i4aAcP}(KM@@W@@|E6VNx#=>}90=}mFI zAdVg6p7MwhGHZmOsmzI$iHV$!NL>!r?=yW>?ys=K7keTOm?j-BKrj<^EFX~yoQJ{E zM=gfKSGw+=>h9jt`_JO8uAMyb(!F^z>ywPK?t%J*z4S4J>DzH?}NaR!GDo2L~gEm1tG*j+Yezv87tUiKb}T7g4Nc)6rV9ukT}uzDR4?*xWS{mo9rwvL*#UUsl8- z6Ou%~0ZDZ)7|3b3jjWCi(r#Jj#gV3fCW68^Z48#q6`^A6_-iV&a1j|u8|PhegM`31 z+bdVAhip$Iwz$|43VYGQnf609)5@p2%HJuHiCpz?e|*(M_UoX)zzK8!bAJU4(y453;3EmHa&FtlR!k0=b} z=U(*6cZ@&Yg++*!`;-$Fa36V#$kV)!PG&`KlXzPKsrLoQxNjO}-y&lzN+h)+{wf)n?59#jka3(5XdnMyiJh1q=-t8huc*o zsUUa5tRc(V&+lqRe)4YJqsAQK_t@?8dlXf~A4A=g`dq1Q!T&YrF`cIxKz%D4>iQvt zx)^l`_BT;UnN@!?RRlZii8T={tszxCfV<0vdsgE%;|>AuP-z+9ZK?=%;L~gH$09I9 zbm|T;e~69wLczR$=ppPcqcSttzlSP<9s4{sPBP7kq@I5IZ~*?B*x(<~@Jl8zbPIl! z%Fo<_*HJ~V--2u`=zYqN+8~7OwzBlX;?vGJf8QgzB6~T^sR6M7G3{gMq|6qe% z)3EiRL#V$(E=46hGRMXAXA?(kjGBY~{GaQ`8f1u>JA(Ad2LhrM;?OSpb|6CEvg81 z=tz)?Ud>^K`*kjyVMS{ZvW@^utsU?4av7P!$h*V~dY7@WD{<@7Rd% z=@B0od54)Fd7Hr|WcTMmvr1pY^<)C~9^ zqKaS#Uz$JpO*Z89Lrr}8I=Dyg=7*0b-$f;5V85Lzf*p2s{^Sd6#1{sMtJKYqj{hl@ znZf>9st9)MmHCs2QLdB=Gk`45f23;PboS-r?m~WwqV8>kY6iUR#8IBU4DZs%-)pIBzm6Jj3Q$?_& zE`1K=6>P9)d$22?M0q)tmBIZWRRlZk>IWl!jg9%99`k{xGKQSw8>!R`_}5cKu!Ao@ zm;W>y^tm4N>XZ3TP-z+9AEk<52VQnE|5Y~JGdC~C1qg$D^&zL>`6SCj0gY2 zz~30y(O)^n6?S8@qrGO|&_gEW5g31)arZ&02zKbD8~PjAh#S{gxuri!Wo2~y8B`JM zxU0AH>uk)8>(tCEHuWtkH3QzJieLv{xvBpyHtvhYy7WDeF=Srur!q6xUqThZj=lV` zh+kr3ZX%ZI2P0lXrDcG>iYkH~c*%+VhuCPFU8j0ZU`V%rm&(bYejil?JLLz8PMNC6~PXAiv#2rvJu~Fh^udz z*2mXFZu1^0H-mnIDuNw7zR6pBL)_3a;XlR3eeRI_?wNb~1|CBG<5XS-`HxUVup=+s z3jcyApRo}p4R-~*}%cJQS!$`7z17q+AuS3g*oQ$ZQl-$xa} z&bm58c`F-m;VcdCiU{S+RAh$w8>k}KsaHlQf5Zk|?0gjT>Hy_aRA7er$EhONnU@79 zJ0`gTLVTB!z+Dxe{3jKYA^mSu5$vRgm-N_x&kq2v@RDm(WQKZ^DuSJQrI*aupwIQ7S9{5e zRA7ereN++b%;)3N_zG^FuEWC2zKVByZiUD@z!3m@|ga&sGtn%-=K)iP5(iAC6H)urtT6bg19YkiVEU z;5whcMtwo4`ryz**8B(+n$h*EsUq09mw)!*b~fa7g}nNM54TZK8QQl}MX=K@`QXD@ zHq=uZb@?|H##Bg#bC)WDopV{H>XmGu^UK>`zifu){9@0)vqn zSL`>VX;*)J!GBR%8QlLt6~T_XbU*oMHrj=5S8gaDiJ`X{W?w-S!4A86L-{N=;tN{8 zx870SNM&Yp{1{aPJNC*Q#wl!F7#XRR^Qev3>T>) z*umG&Mfj~=3qS4ZtvQ>1z405_$nVkQHw`{y$Dc~&W_11=s3O?WmwX533)qNH9*WYS z>iaiOQ5hM`&!dW9$6Wff&v&wco|d31ANF|>m6JhzfhvL>b;Ym1{u~?Zx`e&OZ@~UE zm6gH$6I2oGxU0YF^mo{Z&neMj_`|AQU>-H zs3O>5mp!{SGs6`RPD#{NkFHH)+--(|6I2oGpi7gn*RYXRPO>r;yF%q;bo%jB5$vd| zQ?busQ;8YqC#fRXp;sni&$BU~mDU8ztJAP4m6t&tQAMyLFMq=1huL^5 zM_K)V$q!OV8Q8C&ieQIb@_@-Z*hs6rWW~2?{yLSB!Tf7f5$u@Dju!rejkAhJsty+Z zm`cfj{xnqtJLt-TgJp~9oKkNQfg z2zKorK2-Q7HqH|=3@JZSxS2}Gn2;N&BG@5saiq{;qdl)^Z+)Q9rZO|w*Qp}dvG1~@ z^})~Nyo`^hO1cwuml9D3*w;d`j84DRouieSfGx^e$iHsX6X5wG0Bzm7`G zK>sDG2zKbzTlkN$kv~{YzT#Ur{(#EP;QtU+1Uvr9&HPu`=wCWazj|B$&s1^-{y$Jf zu){Cg0$n!86_oGU#Jy@WbO1BhW@NsXDuNyPaJcwPHsG6GYI)%JbSf*O;ZLE8V8>m0 zVBBCMUON;!POJLsAuK8=!}?CD2zJ)hN0={V13s^SZ~aq^FQy_h)L%#y!A`yW4D;1& z#OsQ9^&#fZQc)S&KSdS6PP^<7^ZjhBrxoj}Q_P2{m<;LnP(`qlF5L|MH5+PexhwZT zU#5aGtp9>4f}M5s9%yc!D;m_6d+T$|I)>k7C^$tG!A`w$>+>Ww=yS@TYC03R?99s#GQX7#xb~LSCz*FpQ5o9bOclXSdspm+=LWt9;x0DQlS;Mah(l?^b5ulz zazYisPPyc~_($0=Pif5MUr6vnR7i&N4^Tz0b1uu4yo(LAj{T~VByXo;GNj*16~Rur z^2GR0*>G#mxaE=YXQ{vp^FN}BU}s)>WIVdS75?dXyYkFU7Fv1IU8+w$%1y}r=lLD!ZN%wst9)8W%<81vf-XJxT|u2 zucx9iv|mdV!A`q02lxp#+iSMV1VCP<*4?M~S9a-*5{=hEH$!XB;&u%<{??&N1N@-`#C8dU^4>x%C;`Fb|mGm7>W zUvP3Q6_(NMCs0MO^ZuWHJnFe@;P)%=U(1h2eG3(zxdXRTMX=w2@+SrFVdGv`+^e4y z+@PW|w9isSu+uKf6#h6H>uJThDogkyR7{5SE2$#bNtb2`-_3?vd(FxW;XA3I4C}X1 zMXj_#L9fmX?#Afbj1NbsBG{Ri zX9gd~23&i~>a5^lDk`JlkEV)Xr(Kd2TxCP8cbF9!!Q)g&hV!$iBG@_C<0On6OKd_? z{AAX^vzceufG;XzvT5w0r!)g9Hbee2RRlZvvRrJ=MqI~(Re9L&qhd0ozn3b4opj~X zgKuWTt-a-zPY=F<3d}J76{-k!=B2ssPqFdVF?eO(`{PtlhV@6NBG_3E=e_@v4fLe4 z+~qm%f1@HYl>db)f}QdfPY*8b~vC~9#1B!tvRkY@<_Z7{eRT4Rl>7S>F1C`;ikwF3`7P18w(f@wl*#6C{$QTXDLGDSpmeeoc5Gx#?Zig6!`UTG z`7#xSVPeG_okr;A?RzlEZ1i^Z?xFmudqo?0cUQCN*lYM+uzs`=o^^8b{+Q$AV?nFg zZm)&Q#UE2Gr|H}3i+}KTtQ9r`yCb^nLVxi(>v!T zHzjE!8w(p^^D2nBHyz)RW)n@<4%VI6N#vD`>9@W5@ur8*wcy)_M78LG9=b$guNCir zb4`?_+0JISD_XZZSW)d4f<`;QUxOst5sFsZvG_lmkXL;&I@!f_)f1=F_3gyD4VT}E zQ8HbW92MWawAxN(*R%5fbMpW5`2S*i1OH>W-7R0zMq4a6eVy)|#q`HEsne-EPP37= znEK)N1$2cvj3H^5LQ7DlDDCRliQC_z=Gw94oJl1b(N3!7#aK3XUv6Zze^`oCVs=LO z2?|rE3*$uS;NeW9G}gnefGs8J$&C7T!|~<(vhj5%fciP-^Y&{Y*FBci;;!oj=a>Dk zY5NZJ*199IgBbiE=C&w2zmY$L3ws?}XET;>-?d>Sx`Bh5_I*NG#~>D8GUeyl?pvhv zB)>OrzX5$@efy2#uQ%bp`}6zq#{GFV4nS%inDxey-aGeL`}JsccvIhO`c?^asapCw z`lOG>jvY1Ky!~4!tP(kOyKJ;{o4xMjW%GScpTvPlw%;$sW(&7%ByJci4b1I+DDb3M zH-@@It$E*5ok;j#&_YAkK4vMDT2s~HA~b6%)m+F1T?e+;cc literal 0 HcmV?d00001 diff --git a/core/dbt/docs/build/doctrees/index.doctree b/core/dbt/docs/build/doctrees/index.doctree new file mode 100644 index 0000000000000000000000000000000000000000..3acd417b911278b24a5d2810fb56f09df6f5612c GIT binary patch literal 87794 zcmeHw50D(yc_*O1tKHRKKwuCwMew4al}voZ0c;a2T*4(lI1+IPQKouldUs|uGd*-Vo;-?{4k51ex9DfnM!L)fa;nn!lJ zl}gjEcwx7*w&IuD4X+t>@9S>At9yI*;?7Xu&V+tDD0|(`DRf79AtoWg;3c`++wWI#=KLDJMmD z<2+zTW-AYPN8nHAy!7rL-uXbk`|s{19T|c7-W?LSW+4t+7+e~qLQW^i##qsx1j6P0WtS_*sncyVvR)K~g2)l&-)jVN*+t!Mhv=CkE!QPI_x|wCe;?mX-NPLYU8ELteXz^EULf+7 zBpdr`s&;?;Tz6bjD)k$qkYwgY4s|;nw=(UPNwRoYWKVx|3DAQMcR1c*uj$k#p-Vhk z{LqU#4Dqi{)jjMv6CT~+9ck5TW$G`_X1TCx7YCgqxXUtvns(1hwA)8-tl+B%MylYo zb$_bVa-*!Btqau%?v?H*{E6wcUss_+ZWYl9KRHzN8hT8h~Yv~z_E`FwqdG2D5 zzGUf<#@tB4oQgQ}_84U?!Uxmeh%snGQ1#>_(nEU8w zs}Q6JaR*CMMrbqtH*+xAhcCbu1ir=*?kH)(i`=pF9@vl+16P^x=79qNWR{YXu4q@D28PJe=61`AIX28c&mJMb)NI@j*I{*$|Q zVkRpM?vn9`qx>(HQPxDMy54CZV>PidH=HI{<}l@jYN!j-NMrxEQueaW_UW(~K$jPZ z5TQ#T8oCS$=(49ZQqsJ}n{?atxKA>-5HU8aQC~cn{tdInY%!pjtq)tqPE*^ae<~IgaMAT!4LWR_TYDn#}1DKeNM`+aj z#gsf6!FK5FaNIDN2A~;PY7f(ULNtVFgJo&WahtL%fkwFMrKbLMne7?MT*~&!u3-FlghaFh;YF(a1EY6w4ci5xK8LcS~U0W$Ajv)w?p(+ z+{gvTXfh%%Xw;f+MzP$G{eIBWZ;jn`Np+4{VP+qvjHS%3ikBZ!YSk6{`lQHLaQo-5 z$aAarR_5%$JS##3t3WhZ{cX$$Vx5I5Y|P$N4O1rE-=I1SQj-~#xKA@3#uL7EjA(tQ z>^JZPpn1foN_WywRyCa217RT{IQlPI#zIqyTAN8&W~sbJIZLIoK8$FQk!Jqm3Gp!1 zOm8x~|AxqYi02<+krz)-6EI)H@Qw%<5(2Ivp-X8w0MjHiGl$7F>O6$b#V9#Cu7N)j zZckeVv=S3;m<Mtt9Nb<9i#)f59{~qNh~O272CuW`bmLf3+!Ht3dEn~b z*z?H)b4_^OWqLq!l$sAGY|c~jW3Fk>LzWTM)Tc%ex^7s4M=5`)1lL5>z;kJ*jB3ua zYnI+;MQ%fScfuksy`f~5y@oA4YR#Hc81G&Y9)t%xLwLWFuK73HjR`MEx|DG_)d<3w zq1{hjy3$NflVm!w&+J-=M~rHG;_*e+`&d{Ij>p56@z7+W=H61SSu#&h-cre|q+K$Z zUXlxT_9>CG5X;wLkr#_!6^t3klm{XmN3B zr!#i6FTU1#u{0cCTZ&#&3ws>Dsb$m_Z(p`#O%t^me#L5JMZZoNOBFpz ze1^0}UMg3;@}V@oJ*lj?H$}cf<3EN)UgN?Z^}8ZmC>^+l(kDLI46*V^Gjz07Qx)53 zxDk~+8IJi)8j__OIkN7xxx5gI*|~Tu@NR9>HiL1>ZCvFwwml7Bm7$3Gfx$>vec&kP z?V76AtfV-LdhSVtG*xX$dhVtoSYBHRp|8C9?cprzyBT?FXOhf%I#=X1>8V<98)vp0 zmOk9mPBdeWOR=Ya85Vghi^dFBicq0spc+aZY-LLYWUx#)il4G9kEXlPSQWRW46)*F zro8(qZjNol=1Q00L`43(GTUZRB%ST0s&?OKJT`tBNl?vlxKr>rk zvy7dlwoRGZvISHY+q0CvRBY)KnF>&P$xc5navBo*udv8VO!V}7MT82O0o9P1Wco{$ zUSh%#`jMrt8m%JW2TUew5)d= zhLpP8OJLIvQ|?mo%-4q$gJvSr_?XCj(D+BN$kQmS&)*Q?f=J*RL@v@BJ}RJ#gwD); z+cJWh@J>&KXS{{R9qgA0LaIJ~DMJN{6WI4xMZQCIe+!Ge>I%;<{6vHZy#dkC+wxl) z(?7S2qNcfxspk5;r?F#L4y}#ABCj>kgKC=y6?y}zp|}1I zD(m+&F0u4T<8FP*UG7bdD=BknORG`_7VTR)5~Xi!;l-}Qv~vkBw59*kbK zjS+qd_yi#3w!kvOjL2`W8o?sZs;FMxEkXsSKs7iWY=uCzQ-fu~?B8cu9*zC1`z#=b#!X1I9MTigp5Dft=nTdTB+$G}=^ZFgjC~LCJ zI;TpOy-N+R5QJ2l#IeUwtit5 zJ56s}vWKAdwiZ7Hhu6sE#;@L}AaMe_zJ)TD^-nj#A}=)2bMOKYD#QjS%8%M1BJg_6QT zLa<)Sma)+ElFhO9zO@lj-cqroOOGlD=_NaSROB$^as(E6xriEuVY2B?N)B+H7b z^b!+}(3dQI)hNw|w7Ip&$0&0tr^ZfwRWu`weSbpaJJ|gyEb{CYZVa9gA%a;T8q6;5 z-j8-_dkep78BtAtS+Ar2{{2@8K`O#*zrGS+cFp2@UF0~#_Ybhhi?87R{kKJUkRI?1 z=`HNmdKIdL1>yMri)B1C;bf!s{0{yNYvrPua~H){%23utJslQ#xu~{k{vG@mi4Y+e zAR2;M-bHcJcTs%Uk~K}%Bk3rtXCK9G%39hG_UzVIb-X8)E9G91_h=Nag+*TF!fEE4 zMYzy9a1E`$&-d_8Sw=|H)ft(tjQ2Wtd_O}NeFerFb$Gje#hYkPrE~VZ$*ic`M1GT^ zif+l`xedlC{L&wM^hlk>1 z1v&+WUjMH(=>$A_O$CR-CTHRCaR4bb1LneU{8P&kX$ssJi{>_sITqj3lzm_E&9z4- zovu&2dFeHK{jA7qv}k_`ivihxQG^QifoiZn*y@N1$Y7ap6#w3`JR0|-3HK>OO#EAv zx0HBuEkza9Ol0TZ5jhVD{4ZGKB~W;nxnhXyMH^s&Xwbi87Bp0FmyAEm>n6)6Yogq2 zh_ZKiQL!`9*#GS!|Dnqd!eRhjenf4PGtA+g_vMP6c}zRw{MDr5#!LuQi6P*r+~2}fww z(pQbvk%ZRV?CCMeTFU60a}f86dZB@clsr$ir31HTj@z-=TnCgGF8eg$JIW5+QG#P=GbrJMNsK{H4M) z#+WK}MjE^Ci`<7S>#!I=mbZ%#Axj_{vRsV8p9rku#BB1Hkx)##MWon`5Hnj3Y# zNI6RhopaRripW)v`PZ<>lUaDw`6Ce`hynm(X33f+>7iKCJ%i2_!^aVH z-%$a5VK!a)(nbYH<;u5~auoH&e16?Fc=w=Vo(1PEb^6D z)FNLdLPbRes!@>#8%L-%daz8G!QGbS(Ih?sQS=#%?5C`y&9*u2P-Qg}+4oySzM~oc zEm-6gP6Cr|qAR6p1na;lo?vn9`d7ZJ0vZl#Rx+Z%EBcG%ErNT4@9x8N38oU2R zk^7Kk2Nna!@*xo-WC=t=mWweWP+?pQXlCni%h+jR+n9>2&tT-6l)Y4Io5Mr37QXU^ zFMZL6U88m@eG>QkiOceBQCXn8{{)M?@`N+ZFNtuWKj0erJ8wUgnRfrsmtSb}4}DZs z6(3&~X}7YYxaYf#WAU1$Pa3aVV_y4^!8<+viBP2Urh`i*|MZew|GCI@(D=V$k*87A zvspDlc@&=91&Kg4NR$j5Rp})r9HA|izG~zS$K>Y5nio>eQbOk(YhEI96=Yrvi#(Zy z$C{Um5J4mm4I-DvnkRj%xyO<uqVs z`H}VjW!+aWb1>KMTl0#c$rk>~Rye1`rBk*`nzd^J?i-v9sgXXu}? zjG(64t+8t7{0jXGgd!DidcaiLCsRiuu~5G+avehb9xU=g6*WAsi%=mspc<0v-|$$U zH}$rqM;e90F@?FqrcNETpv`Ybt#ziN@Pgl1Ih7Kbb3{}9Ryx8;!~_rpXa)TqkEC1FF7$N-uNMO8Raak!^24~ zr9<9qHeqh0vEv^TISyG|0gJpWM74RZ2o=-;bz!Kx+0qw{x{Yx?)`QOc)hK%@e+B0J zZxwkB-kPu&kheQTsNfB#25*B+*Q=+}V41M`@3Ab8#(mOx%)j~a5M?dp-JIT6Wi=Do z_dgc-4h8%nEb$U2odZ9(O`ec%(1KBE*XEA*JmxGtZ8yn+#V@S_RjggMEOgF zY0U4d&>3m${@;n*hb;dF76Zug4G|(_2}DDdi!t%8!nhdF%+@=WvD3tsJp4#(edhdE zt(R+Ulsj2wA^&9W3&CyS1ltI@a*$(}d-?CmZ{Nzg+qh z`)pVX@ps&B#~=7uGydxxY5Dln9R3cQZj0XKjnhNVt01l7O}g|!XZ-2b>)b7Vpa+?{ z!U=ZP0gmz0tJH_|I`$*IfG_FasXys;>Qi=2ex-ja-_lF+kN(tq+vkYlj9P{`8?|h% zuC%HRbKSz6Z@2VCQ|;!si8Q6gowmyfK-xy~*>|ceoWP!6E%F?#W5(05r1nvs2IVa!d!Cc<+9G#Ba{!Ax&4v4BpA{j3 zVjvn6FXS4mzZ#bp0o5-<1?9Q%dK&KpK_KGI_Dba--}!Yng1OYc`^&H zalR=+1d%{Ah+JOdJn3tk?^?2^NqQ)5qI%Xihc+BX(7eXU$N9vk;ivPQy~$kt)>EFM z9(LhXPIYEc>cX1znz7H9Vo#q7i+p_&?&BF3;i6Un*Qiw=%;uI0eHo$X?HrpcEq&B9 zG#X2Q&g@CP%f=nxy_C1KVdM|tsMekn*y;Tur_n&(2#Y+Sg?mP>2oY2Q(V%kq%Q(}% zWfV2Btxv?(=esy}Q07v#6__LcoXBTr?e}1j*P5tjb-xG|dIPGVx51{jRqY!r6Rxq3 zT9!v+UNtlMN%AKtZz=KSe7GvCnaIw6Q{+4(@E5SiOQ3Lb^SlTV^aIhLf5}WrtKcpf zf0)--ETgQ6a&yug9cPyO4FZrV)0h!gu`|-x|9=ws4_*Eg76a(=JrN>w2}DDei!oKL z!nhdF%+~NmioIwCjg#S<^(Fd|LIkp83!>DPyUkdycPD^}8pPE9B2a zzC+{x6Bc=m3#TYg-9)*FI;Frhl>Rq*ZP^D7I~$H z+pJB}W{q_6yEmByHYO?t1olg?7+7GJiEz=B0@n~&(MfGp$BN$0vDt0uqek)R3B^6E za6f84LGI8DFe zh1q-4CnPV9>fM&5(*!s~qPZ>Mj#YO*<=ariPvbQaF6aZUL7%ff@>`B~#4ESs51QbcCbb}p9F&~4OlH+{{u?cFdz817#*xW(yvwrKG2DmQ%tCsW8Gp)+$ASVmBj-NsmUx>{Iyms0jpOQ+X(d5XW`wWtfg9;_XXjG$Y1De8Wwq-iTW5v zMX1mkPz|m1?_*fMAbiZyBaOFp;4NuIwQZ;mP{vZ$3T&5nMC31c`!XyB)#i-4gq`*7I^^_en7k~ zLIn9hG{|2vn-f%UmyAEm>)V!5)+8BsSdOz*;?ym2T^idWROE~__I`-6mvxj^!(sqc zZWSRyl|VF9xfuHuR2UZnn%TO@GIpBQHsGO0TI;h>;xfuz+63}zSSmoAz;1s`T+j1b)~zCM!EzH8 zd6o-5;O-D1f?*&U3@_w6Q%VL43Bg*q$1)b0R^qABc-~j*4nIWsN`;bcmr?;pFWK2Y z7C8&4{2?szQV}%>Pl`|>7oZw)k!*!hrI(m+gr2qZRiiT=-sJX9y+j#H`4sG(`a6-| zVD@idk!QAWBk+a@5v&5yV0C%#l(Bc}9m|Mn+KYP`{dZ2SI$bWnY^Ri}Q`t3Fm{H1B z){$KYi@fj(?wmSDga_FH&yd~1Zfa7YT38T{|8~oGXrhTn=J|b7ms8Ht7HiIZQ&)>z zMH}^TSmdRm+NhhazW%y&K|)XB;OrA2LM%Wu#In3^>ZI?RI%vt7Ch4^pZT0M$YEZUP zJ@@R7Qgyp0mG#yZc@BjKu*fT1xR&`@5iWENTtnyY^IcQ-T1H6I)TUTd$JsOW2<6{b zT)dy=OnpC1$#2Cc;rrRnn&bV<>iQE=8Ax44_t>m-gQ+lWNs|jkeM*Wt{dHL6HLW+% zsv_x0jQE@g5o!jaq2~XvzujupXB{e78_Tv~y1l01mIHsMbD&xaol32YHCQ*8O;&m* zJg4EdTC}-F?Zu&!CTIMhqU92=+|muyCdQj};^ThlTF*Acbm*j{cD>?IwyuL`747W_ zXcx~8$D7*ej2&&vmeOrLS2*!Sz3tsTmh$kIOTGwnqSJ8-L8mNf?{&Ov$){HLHdEaj z&#KIAv0iy``E>&5TlwbNR+KJ9q+Q$ensxZ5$ZxdfKZZqK`J&PCyCPJmAE<`<2V3<~ z0U0b4j^dg#C?3I<%7zLui=`hV2%xXBc_7VS z$;BD8>3FFpm4)_Gk;^3QqQm9_ZzjAa#h(6WSmdjfsIeSAlkyOCxq)g_t^T_Pt=~*I z+tMRVHX|Ur&#MXBDeJz1nPc#0ZeLf;l$pr-xm4skn#P^5$a5&1)VoTAiv|+728)Zd zt6T+ikn$UwscuuOI^z`x9(Eq2{H4<4J?tt$9u%=0f~E6gbLz-Y7jTrw4!P=2g`)p z*`HXJN25LImgJ`--=(ajyqmL+X}3h>oy;h)@274hJ_~p9pn(62(&iOV_!+T5gb4P5 zXt2LzrT|rNmyAEm>o&_MYnn_tErxEm(=tx^OND97I;zkaY3%-GBKIN7UxCE{vfM2~ zge-w*$Z|2J163Fo1De_Tv}Noxv2Bc>dnC3#6Ob-tFVz}vWLFX51opco@*66fghgIu zqJGPa2o*{Ls-ZN?z3KP=m)|@k{-278Wi}kZyDf{N5ufx_`|NSQpYoQnzLFNpXVR&h z^v_oyd-iJ)czGL^3)dY#k?j$ z1gStYNL|P`3sePPNC+0nTb8lVgt9IXO3ycC-lL4AN=cWRR0PsX_IA}-#Js4-dm1eA zS`jt+TSTbP3s4QcNS2XQ=_Mu{p$ja1)%Z+?vAG4OODSh5p@P>JE*CkDX5d$0k*BtB zGq6X52wH(?(7OEkg7NyoO_mYW#Fz9X`@g^7QT9>=X74MgT9sXMm1&5)hVrIikyl>9 z_ZN38M<58R|{0A78Zo#f6Ou-nrf0MnEVS24^Y-p#mu=#{)os|XywbW$ZJKl zQuDvS@Pr5vY5}65mgPnAlfFp)j3sNDrj!16&)WD4l&@6KJ?|>0+TD}N)$#ix*CFxm z!6Gkl;ZW;!5iW!dTtn#Z^EVjYwv3P_s?Bj<{dmW|o_h9i)Wth5Hf66cq+i`lhpT;l zVNtE7oJAe?qB}6=IUIDW6nFY`SPU%Yi$u5(GjI(tpO6DRRb@XRc`=(Gwk(|{#Pz9s zawmlDrp%>HT!DQ!dqqB@S-Tb%c_oO3jyH=?p#z{AI_SU7X}u3;%F-i^yNwxl`C~$7 zD0?Y;^R7|fA@UjA-3E&Rxw}V%3hsbffII6o>IW@7(zx4@ahF@Cew=cbQdeM|`bm+~ zAn(s%F(7%*icmowPz~}1TVhr%?_im5ll!7&c{J*|+UD1)U!&}$+>6$#-w^o^75pPC z@+v4iz4ML;5efjJp@1c`Hmt<7Wc*=WuJ{14RCq>~qpa!jjI2pI&U*D0!jOv7SesVi zGt#&S=ZJ~`X`Tg(0i?NIgovO4(U9h1EE=mYE(SES^%2Y1X@Wal7hIn;>pcV^)f+ES zt0-{-`@T=)JCt@EEb>Yd^+6AcP@y)U8fvp#Y*wMLwC1=SvMhnd`KFBXKFifdDSs*9 z^Q>xqR^&3Mz7rOCstZ5w?iC?|W*{0gFXWP=(!xSQuwWjxjD;o`9-#J~%k~6iE>%mq zl&PYSUb4qei9Cj0z7C7LUPMj9b0Sn|2B?N+B+Hbl^b!+}(94#-YMk=mC%3x!I^`}U zHCszk<;t$v={H4AgVrCzB2R00A?U6nRX_HdX)_nqTsaw6Dzdq%!R94g@>L3xxA5 zJor-F={hX(6WWQLCUzV=I2mA zfklT$MgF2W{{vX$RUqoseNBW44FJ{9zzJS-_?D$d8gC;pZ}|m>7b$BgbJ|zPluor2 zR@*Bgufg75!y?a~unzx7gbV(FYw$P!=f+gQ=f9ET@iR-mG#1y#EcW{_*@|=JJZ`Qv zD>eG|R#Zv`^>mb5`XuW26PIL|z>0SZ!R%UC|pBBB|Z1j;c4;RbX3|7hb~eJx<06#op!d!SEvEL8fs|ok58#{+csv|sxc&vwXP~Bm#>D1yq>$7C#haUgxRNccK z9d1W=ct=|GTDca{fqQ8#{9&`jL1(tdEF-Atc5AHLK62x&@gE@+X+y+seyi%p3GDt= zBKINIkHR7^*1}Us*NYG#IUpL6TYeRM`WDM5YO))RW!L9b@I#chRBZDuvxg#wA+#1O z@p` zR1gPLgSf#~$5op-SSH-ge%rD<8tq9ZHNU+6GG#60U9`OZs>pXJ;BR4(S3u!s#7{(s zU>}GE`%7jyTM22&_`{m~xn-0!O>T;tqT?*Buh}LSrm>8!LT9A0`x_{GSwDLO76Zs~ zn+OrI1fn6!#aQ1~VO$JoX6q8m*lA+hn2N2>vig;jy|fYJRdf|0PGG-xi~L68_v^68 zt4!2y`LqZXN&~8)G|Po?6$(phj$6sH1RCE-2Q?Y;`GNNFDxdO}l0DCo`i#h3&>X=c zPjlgC+}$EXPz*$a;)PuARytTn2o}qIma)*pl1#n!E~h_A8B3M2K8*apD`6g~O_LSr z3`=h^d;hq|duZn?u*hpiIFTx4>AA-DmiS@NLlsY$yg~?4;?s>2O8V&~yZxHTZ4mtfSmcQo zJqz9vp@L?h8Z=8bMX1tCOgKWnu=G_UHW}UK_D~F;Cnr?!hWHlBUe=l12#Y+mh1>rN zM2Mgjhz708Z-^Uji0`tDs3yLoi`oAT@vA6%X_J!e+EKMCyXGphN8~jcm1|&;S6;zg zJU5B(pgZ6hx?9){HY!vL3&Qa)TgF3EO)@i+-@y}7)>6gHxr67Z$X96P2rTkiQLWVc zZ-^fgAwn%cG}N-ZgXg60;Q5jzYnrBq;{H|7?w!XdU#Xyb-UU~+yC;>a;}as+A@Q%m zA}?`aXZjftE`$zTL+J1GH^jed86iznn-ftT=OcHo57Ui)T&%OO0 z_Ol#G`g7GC)${hQY)@#poQqt_H1HV@$K6(KyyBPR&$zi}ww>(0vCyIWoZkDe{J#BQ zw_UxYx{mggt4cLb+@p4Uw8XmU7&8b zp))k)HSrNZnmVBi8o$xvJRK897sClmiSj-~m+RDJJE+suXylhm6K?qsekHh7F8>J(Ye$ePr3JsKlTB}s{8}X;{D9dw_6xt^cr<7)D zQMFXAyJ1K!6QVN65=H{AYklcDbsv$vaY`Ska+i?J&J*Rfjgs zO|+*dzx6Y2(4<(=9f9XIC}k)LXd?m&)%|>DeXZGQMj^CdYqmJgm{* z=bAs3yddy{Qq^r%>Rv!`hO6bM+u7i^BQuv(aW37?ww_;lvY6g{{wb$))+hP052^LJ z<_|r_osB3%NP((Si0V|PZfCUaHmBOOGlU{uq3hGUX1BA2Hm=a81xppLMfxC^S{&)s zWxqK|#iWSTDy3H7P1cTdtE)TfX+Mcy;-t=LH4@cmrQK-J?iVFI3T&KEBh3YLlpv_v z5T{Udn<41uq*my)D}L!n1CrWEJ2+@_M@hGgRYIF<4%fn3BGC<{N-cCJ>L`fva!pzx zsyjL(RG5&eKvGFbHR`s~WUWqxJJ{`tqi1OjUoyjgqqMJ~wNIA_Z z5@Nm9s72lDH;#4Ix4c$~g5ptKqzx+M`3O=l?NLJI=$uuJqE>jtu3eNI?rhD{o|WcK zKbYFp?VN!-V+ZKYQsmKjj8x;gowbxce$j{i6~%LXFj?-&9x0st2h;2^*#gNP@`9Z; zFPe-osM_zUMvXdw4bfZ-)uKvN?e1OM*+!adl(OoX6`&Nhppavojp1xFa*vd%wW(^I z{*iuH)_q8SBy6};(Qa*1y$&f*TjJ~FvQ+oWt_tn8a@8e&i1?(wQIz8llJ%i>iy+pM zcM)yT@l-o-jMU?oYt2d0K-w_S`5@&eY)|yuS8G7LII}XYjVt3;i8Rxg@G4Y=XUT=C z=S2i^Mw#w9RPq`$K~-`q6`HX^3-b96ok$!%60lWkGHKS1byj%I!`&O&6P>k5b3*l( zq?m29Nd|eba5j-sB&`UWs+uIT$&lL6&SoRE={5+8k}0>z>|!;lP$f=ZgT&7UZ` zX!-B^ZdV;Lktt<8 literal 0 HcmV?d00001 diff --git a/core/dbt/docs/build/html/.buildinfo b/core/dbt/docs/build/html/.buildinfo new file mode 100644 index 00000000000..39803f13c3e --- /dev/null +++ b/core/dbt/docs/build/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 1ee31fc16e025fb98598189ba2cb5fcb +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/core/dbt/docs/build/html/_sources/index.rst.txt b/core/dbt/docs/build/html/_sources/index.rst.txt new file mode 100644 index 00000000000..d5e3c6007af --- /dev/null +++ b/core/dbt/docs/build/html/_sources/index.rst.txt @@ -0,0 +1,4 @@ +dbt-core's API documentation +============================ + +.. dbt_click:: dbt.cli.main:cli diff --git a/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js b/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 00000000000..8549469dc29 --- /dev/null +++ b/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,134 @@ +/* + * _sphinx_javascript_frameworks_compat.js + * ~~~~~~~~~~ + * + * Compatability shim for jQuery and underscores.js. + * + * WILL BE REMOVED IN Sphinx 6.0 + * xref RemovedInSphinx60Warning + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/core/dbt/docs/build/html/_static/alabaster.css b/core/dbt/docs/build/html/_static/alabaster.css new file mode 100644 index 00000000000..0eddaeb07d1 --- /dev/null +++ b/core/dbt/docs/build/html/_static/alabaster.css @@ -0,0 +1,701 @@ +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: Georgia, serif; + font-size: 17px; + background-color: #fff; + color: #000; + margin: 0; + padding: 0; +} + + +div.document { + width: 940px; + margin: 30px auto 0 auto; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 220px; +} + +div.sphinxsidebar { + width: 220px; + font-size: 14px; + line-height: 1.5; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.body { + background-color: #fff; + color: #3E4349; + padding: 0 30px 0 30px; +} + +div.body > .section { + text-align: left; +} + +div.footer { + width: 940px; + margin: 20px auto 30px auto; + font-size: 14px; + color: #888; + text-align: right; +} + +div.footer a { + color: #888; +} + +p.caption { + font-family: inherit; + font-size: inherit; +} + + +div.relations { + display: none; +} + + +div.sphinxsidebar a { + color: #444; + text-decoration: none; + border-bottom: 1px dotted #999; +} + +div.sphinxsidebar a:hover { + border-bottom: 1px solid #999; +} + +div.sphinxsidebarwrapper { + padding: 18px 10px; +} + +div.sphinxsidebarwrapper p.logo { + padding: 0; + margin: -10px 0 0 0px; + text-align: center; +} + +div.sphinxsidebarwrapper h1.logo { + margin-top: -10px; + text-align: center; + margin-bottom: 5px; + text-align: left; +} + +div.sphinxsidebarwrapper h1.logo-name { + margin-top: 0px; +} + +div.sphinxsidebarwrapper p.blurb { + margin-top: 0; + font-style: normal; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: Georgia, serif; + color: #444; + font-size: 24px; + font-weight: normal; + margin: 0 0 5px 0; + padding: 0; +} + +div.sphinxsidebar h4 { + font-size: 20px; +} + +div.sphinxsidebar h3 a { + color: #444; +} + +div.sphinxsidebar p.logo a, +div.sphinxsidebar h3 a, +div.sphinxsidebar p.logo a:hover, +div.sphinxsidebar h3 a:hover { + border: none; +} + +div.sphinxsidebar p { + color: #555; + margin: 10px 0; +} + +div.sphinxsidebar ul { + margin: 10px 0; + padding: 0; + color: #000; +} + +div.sphinxsidebar ul li.toctree-l1 > a { + font-size: 120%; +} + +div.sphinxsidebar ul li.toctree-l2 > a { + font-size: 110%; +} + +div.sphinxsidebar input { + border: 1px solid #CCC; + font-family: Georgia, serif; + font-size: 1em; +} + +div.sphinxsidebar hr { + border: none; + height: 1px; + color: #AAA; + background: #AAA; + + text-align: left; + margin-left: 0; + width: 50%; +} + +div.sphinxsidebar .badge { + border-bottom: none; +} + +div.sphinxsidebar .badge:hover { + border-bottom: none; +} + +/* To address an issue with donation coming after search */ +div.sphinxsidebar h3.donation { + margin-top: 10px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #004B6B; + text-decoration: underline; +} + +a:hover { + color: #6D4100; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: Georgia, serif; + font-weight: normal; + margin: 30px 0px 10px 0px; + padding: 0; +} + +div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } +div.body h2 { font-size: 180%; } +div.body h3 { font-size: 150%; } +div.body h4 { font-size: 130%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #DDD; + padding: 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + color: #444; + background: #EAEAEA; +} + +div.body p, div.body dd, div.body li { + line-height: 1.4em; +} + +div.admonition { + margin: 20px 0px; + padding: 10px 30px; + background-color: #EEE; + border: 1px solid #CCC; +} + +div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fafafa; +} + +div.admonition p.admonition-title { + font-family: Georgia, serif; + font-weight: normal; + font-size: 24px; + margin: 0 0 10px 0; + padding: 0; + line-height: 1; +} + +div.admonition p.last { + margin-bottom: 0; +} + +div.highlight { + background-color: #fff; +} + +dt:target, .highlight { + background: #FAF3E8; +} + +div.warning { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.danger { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.error { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.caution { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.attention { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.important { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.note { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.tip { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.hint { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.seealso { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.topic { + background-color: #EEE; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre, tt, code { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; + font-size: 0.9em; +} + +.hll { + background-color: #FFC; + margin: 0 -12px; + padding: 0 12px; + display: block; +} + +img.screenshot { +} + +tt.descname, tt.descclassname, code.descname, code.descclassname { + font-size: 0.95em; +} + +tt.descname, code.descname { + padding-right: 0.08em; +} + +img.screenshot { + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils { + border: 1px solid #888; + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils td, table.docutils th { + border: 1px solid #888; + padding: 0.25em 0.7em; +} + +table.field-list, table.footnote { + border: none; + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + +table.footnote { + margin: 15px 0; + width: 100%; + border: 1px solid #EEE; + background: #FDFDFD; + font-size: 0.9em; +} + +table.footnote + table.footnote { + margin-top: -15px; + border-top: none; +} + +table.field-list th { + padding: 0 0.8em 0 0; +} + +table.field-list td { + padding: 0; +} + +table.field-list p { + margin-bottom: 0.8em; +} + +/* Cloned from + * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68 + */ +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +table.footnote td.label { + width: .1px; + padding: 0.3em 0 0.3em 0.5em; +} + +table.footnote td { + padding: 0.3em 0.5em; +} + +dl { + margin: 0; + padding: 0; +} + +dl dd { + margin-left: 30px; +} + +blockquote { + margin: 0 0 0 30px; + padding: 0; +} + +ul, ol { + /* Matches the 30px from the narrow-screen "li > ul" selector below */ + margin: 10px 0 10px 30px; + padding: 0; +} + +pre { + background: #EEE; + padding: 7px 30px; + margin: 15px 0px; + line-height: 1.3em; +} + +div.viewcode-block:target { + background: #ffd; +} + +dl pre, blockquote pre, li pre { + margin-left: 0; + padding-left: 30px; +} + +tt, code { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ +} + +tt.xref, code.xref, a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fff; +} + +a.reference { + text-decoration: none; + border-bottom: 1px dotted #004B6B; +} + +/* Don't put an underline on images */ +a.image-reference, a.image-reference:hover { + border-bottom: none; +} + +a.reference:hover { + border-bottom: 1px solid #6D4100; +} + +a.footnote-reference { + text-decoration: none; + font-size: 0.7em; + vertical-align: top; + border-bottom: 1px dotted #004B6B; +} + +a.footnote-reference:hover { + border-bottom: 1px solid #6D4100; +} + +a:hover tt, a:hover code { + background: #EEE; +} + + +@media screen and (max-width: 870px) { + + div.sphinxsidebar { + display: none; + } + + div.document { + width: 100%; + + } + + div.documentwrapper { + margin-left: 0; + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + } + + div.bodywrapper { + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + margin-left: 0; + } + + ul { + margin-left: 0; + } + + li > ul { + /* Matches the 30px from the "ul, ol" selector above */ + margin-left: 30px; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .bodywrapper { + margin: 0; + } + + .footer { + width: auto; + } + + .github { + display: none; + } + + + +} + + + +@media screen and (max-width: 875px) { + + body { + margin: 0; + padding: 20px 30px; + } + + div.documentwrapper { + float: none; + background: #fff; + } + + div.sphinxsidebar { + display: block; + float: none; + width: 102.5%; + margin: 50px -30px -20px -30px; + padding: 10px 20px; + background: #333; + color: #FFF; + } + + div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, + div.sphinxsidebar h3 a { + color: #fff; + } + + div.sphinxsidebar a { + color: #AAA; + } + + div.sphinxsidebar p.logo { + display: none; + } + + div.document { + width: 100%; + margin: 0; + } + + div.footer { + display: none; + } + + div.bodywrapper { + margin: 0; + } + + div.body { + min-height: 0; + padding: 0; + } + + .rtd_doc_footer { + display: none; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .footer { + width: auto; + } + + .github { + display: none; + } +} + + +/* misc. */ + +.revsys-inline { + display: none!important; +} + +/* Make nested-list/multi-paragraph items look better in Releases changelog + * pages. Without this, docutils' magical list fuckery causes inconsistent + * formatting between different release sub-lists. + */ +div#changelog > div.section > ul > li > p:only-child { + margin-bottom: 0; +} + +/* Hide fugly table cell borders in ..bibliography:: directive output */ +table.docutils.citation, table.docutils.citation td, table.docutils.citation th { + border: none; + /* Below needed in some edge cases; if not applied, bottom shadows appear */ + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + + +/* relbar */ + +.related { + line-height: 30px; + width: 100%; + font-size: 0.9rem; +} + +.related.top { + border-bottom: 1px solid #EEE; + margin-bottom: 20px; +} + +.related.bottom { + border-top: 1px solid #EEE; +} + +.related ul { + padding: 0; + margin: 0; + list-style: none; +} + +.related li { + display: inline; +} + +nav#rellinks { + float: right; +} + +nav#rellinks li+li:before { + content: "|"; +} + +nav#breadcrumbs li+li:before { + content: "\00BB"; +} + +/* Hide certain items when printing */ +@media print { + div.related { + display: none; + } +} \ No newline at end of file diff --git a/core/dbt/docs/build/html/_static/basic.css b/core/dbt/docs/build/html/_static/basic.css new file mode 100644 index 00000000000..4e9a9f1faca --- /dev/null +++ b/core/dbt/docs/build/html/_static/basic.css @@ -0,0 +1,900 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/core/dbt/docs/build/html/_static/custom.css b/core/dbt/docs/build/html/_static/custom.css new file mode 100644 index 00000000000..2a924f1d6a8 --- /dev/null +++ b/core/dbt/docs/build/html/_static/custom.css @@ -0,0 +1 @@ +/* This file intentionally left blank. */ diff --git a/core/dbt/docs/build/html/_static/doctools.js b/core/dbt/docs/build/html/_static/doctools.js new file mode 100644 index 00000000000..527b876ca63 --- /dev/null +++ b/core/dbt/docs/build/html/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/core/dbt/docs/build/html/_static/documentation_options.js b/core/dbt/docs/build/html/_static/documentation_options.js new file mode 100644 index 00000000000..b57ae3b8393 --- /dev/null +++ b/core/dbt/docs/build/html/_static/documentation_options.js @@ -0,0 +1,14 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/core/dbt/docs/build/html/_static/file.png b/core/dbt/docs/build/html/_static/file.png new file mode 100644 index 0000000000000000000000000000000000000000..a858a410e4faa62ce324d814e4b816fff83a6fb3 GIT binary patch literal 286 zcmV+(0pb3MP)s`hMrGg#P~ix$^RISR_I47Y|r1 z_CyJOe}D1){SET-^Amu_i71Lt6eYfZjRyw@I6OQAIXXHDfiX^GbOlHe=Ae4>0m)d(f|Me07*qoM6N<$f}vM^LjV8( literal 0 HcmV?d00001 diff --git a/core/dbt/docs/build/html/_static/jquery-3.6.0.js b/core/dbt/docs/build/html/_static/jquery-3.6.0.js new file mode 100644 index 00000000000..fc6c299b73e --- /dev/null +++ b/core/dbt/docs/build/html/_static/jquery-3.6.0.js @@ -0,0 +1,10881 @@ +/*! + * jQuery JavaScript Library v3.6.0 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright OpenJS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2021-03-02T17:08Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var flat = arr.flat ? function( array ) { + return arr.flat.call( array ); +} : function( array ) { + return arr.concat.apply( [], array ); +}; + + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + +var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 + // Plus for old WebKit, typeof returns "function" for HTML collections + // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) + return typeof obj === "function" && typeof obj.nodeType !== "number" && + typeof obj.item !== "function"; + }; + + +var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + +var document = window.document; + + + + var preservedScriptAttributes = { + type: true, + src: true, + nonce: true, + noModule: true + }; + + function DOMEval( code, node, doc ) { + doc = doc || document; + + var i, val, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + + // Support: Firefox 64+, Edge 18+ + // Some browsers don't support the "nonce" property on scripts. + // On the other hand, just using `getAttribute` is not enough as + // the `nonce` attribute is reset to an empty string whenever it + // becomes browsing-context connected. + // See https://github.com/whatwg/html/issues/2369 + // See https://html.spec.whatwg.org/#nonce-attributes + // The `node.getAttribute` check was added for the sake of + // `jQuery.globalEval` so that it can fake a nonce-containing node + // via an object. + val = node[ i ] || node.getAttribute && node.getAttribute( i ); + if ( val ) { + script.setAttribute( i, val ); + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + +function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; +} +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.6.0", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + even: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return ( i + 1 ) % 2; + } ) ); + }, + + odd: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return i % 2; + } ) ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + copy = options[ name ]; + + // Prevent Object.prototype pollution + // Prevent never-ending loop + if ( name === "__proto__" || target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + src = target[ name ]; + + // Ensure proper type for the source value + if ( copyIsArray && !Array.isArray( src ) ) { + clone = []; + } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { + clone = {}; + } else { + clone = src; + } + copyIsArray = false; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + // Evaluates a script in a provided context; falls back to the global one + // if not specified. + globalEval: function( code, options, doc ) { + DOMEval( code, { nonce: options && options.nonce }, doc ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return flat( ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), + function( _i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); + } ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.6 + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://js.foundation/ + * + * Date: 2021-02-16 + */ +( function( window ) { +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + nonnativeSelectorCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ( {} ).hasOwnProperty, + arr = [], + pop = arr.pop, + pushNative = arr.push, + push = arr.push, + slice = arr.slice, + + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[ i ] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + + "ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram + identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + + "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + + // "Attribute values must be CSS identifiers [capture 5] + // or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + + whitespace + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + + "*" ), + rdescend = new RegExp( whitespace + "|>" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + + whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + + whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rhtml = /HTML$/i, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), + funescape = function( escape, nonHex ) { + var high = "0x" + escape.slice( 1 ) - 0x10000; + + return nonHex ? + + // Strip the backslash prefix from a non-hex escape sequence + nonHex : + + // Replace a hexadecimal escape sequence with the encoded Unicode code point + // Support: IE <=11+ + // For values outside the Basic Multilingual Plane (BMP), manually construct a + // surrogate pair + high < 0 ? + String.fromCharCode( high + 0x10000 ) : + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + inDisabledFieldset = addCombinator( + function( elem ) { + return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + ( arr = slice.call( preferredDoc.childNodes ) ), + preferredDoc.childNodes + ); + + // Support: Android<4.0 + // Detect silently failing push.apply + // eslint-disable-next-line no-unused-expressions + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + pushNative.apply( target, slice.call( els ) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + + // Can't trust NodeList.length + while ( ( target[ j++ ] = els[ i++ ] ) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + setDocument( context ); + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { + + // ID selector + if ( ( m = match[ 1 ] ) ) { + + // Document context + if ( nodeType === 9 ) { + if ( ( elem = context.getElementById( m ) ) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && ( elem = newContext.getElementById( m ) ) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[ 2 ] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !nonnativeSelectorCache[ selector + " " ] && + ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && + + // Support: IE 8 only + // Exclude object elements + ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { + + newSelector = selector; + newContext = context; + + // qSA considers elements outside a scoping root when evaluating child or + // descendant combinators, which is not what we want. + // In such cases, we work around the behavior by prefixing every selector in the + // list with an ID selector referencing the scope context. + // The technique has to be used as well when a leading combinator is used + // as such selectors are not recognized by querySelectorAll. + // Thanks to Andrew Dupont for this technique. + if ( nodeType === 1 && + ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + + // We can use :scope instead of the ID hack if the browser + // supports it & if we're not changing the context. + if ( newContext !== context || !support.scope ) { + + // Capture the context ID, setting it first if necessary + if ( ( nid = context.getAttribute( "id" ) ) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", ( nid = expando ) ); + } + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + + toSelector( groups[ i ] ); + } + newSelector = groups.join( "," ); + } + + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + nonnativeSelectorCache( selector, true ); + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return ( cache[ key + " " ] = value ); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement( "fieldset" ); + + try { + return !!fn( el ); + } catch ( e ) { + return false; + } finally { + + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split( "|" ), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[ i ] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( ( cur = cur.nextSibling ) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return ( name === "input" || name === "button" ) && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + inDisabledFieldset( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction( function( argument ) { + argument = +argument; + return markFunction( function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ ( j = matchIndexes[ i ] ) ] ) { + seed[ j ] = !( matches[ j ] = seed[ j ] ); + } + } + } ); + } ); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + var namespace = elem && elem.namespaceURI, + docElem = elem && ( elem.ownerDocument || elem ).documentElement; + + // Support: IE <=8 + // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes + // https://bugs.jquery.com/ticket/4833 + return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9 - 11+, Edge 12 - 18+ + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( preferredDoc != document && + ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, + // Safari 4 - 5 only, Opera <=11.6 - 12.x only + // IE/Edge & older browsers don't support the :scope pseudo-class. + // Support: Safari 6.0 only + // Safari 6.0 supports :scope but it's an alias of :root there. + support.scope = assert( function( el ) { + docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); + return typeof el.querySelectorAll !== "undefined" && + !el.querySelectorAll( ":scope fieldset div" ).length; + } ); + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert( function( el ) { + el.className = "i"; + return !el.getAttribute( "className" ); + } ); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert( function( el ) { + el.appendChild( document.createComment( "" ) ); + return !el.getElementsByTagName( "*" ).length; + } ); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert( function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + } ); + + // ID filter and find + if ( support.getById ) { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute( "id" ) === attrId; + }; + }; + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode( "id" ); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( ( elem = elems[ i++ ] ) ) { + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find[ "TAG" ] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { + + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert( function( el ) { + + var input; + + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll( "[selected]" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push( "~=" ); + } + + // Support: IE 11+, Edge 15 - 18+ + // IE 11/Edge don't find elements on a `[name='']` query in some cases. + // Adding a temporary attribute to the document before the selection works + // around the issue. + // Interestingly, IE 10 & older don't seem to have the issue. + input = document.createElement( "input" ); + input.setAttribute( "name", "" ); + el.appendChild( input ); + if ( !el.querySelectorAll( "[name='']" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + + whitespace + "*(?:''|\"\")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll( ":checked" ).length ) { + rbuggyQSA.push( ":checked" ); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push( ".#.+[+~]" ); + } + + // Support: Firefox <=3.6 - 5 only + // Old Firefox doesn't throw on a badly-escaped identifier. + el.querySelectorAll( "\\\f" ); + rbuggyQSA.push( "[\\r\\n\\f]" ); + } ); + + assert( function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement( "input" ); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll( "[name=d]" ).length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: Opera 10 - 11 only + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll( "*,:x" ); + rbuggyQSA.push( ",.*:" ); + } ); + } + + if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector ) ) ) ) { + + assert( function( el ) { + + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + } ); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + ) ); + } : + function( a, b ) { + if ( b ) { + while ( ( b = b.parentNode ) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { + + // Choose the first element that is related to our preferred document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( a == document || a.ownerDocument == preferredDoc && + contains( preferredDoc, a ) ) { + return -1; + } + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( b == document || b.ownerDocument == preferredDoc && + contains( preferredDoc, b ) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + return a == document ? -1 : + b == document ? 1 : + /* eslint-enable eqeqeq */ + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( ( cur = cur.parentNode ) ) { + ap.unshift( cur ); + } + cur = b; + while ( ( cur = cur.parentNode ) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[ i ] === bp[ i ] ) { + i++; + } + + return i ? + + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[ i ], bp[ i ] ) : + + // Otherwise nodes in our document sort first + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + ap[ i ] == preferredDoc ? -1 : + bp[ i ] == preferredDoc ? 1 : + /* eslint-enable eqeqeq */ + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + setDocument( elem ); + + if ( support.matchesSelector && documentIsHTML && + !nonnativeSelectorCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch ( e ) { + nonnativeSelectorCache( expr, true ); + } + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( context.ownerDocument || context ) != document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( elem.ownerDocument || elem ) != document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return ( sel + "" ).replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + + // If no nodeType, this is expected to be an array + while ( ( node = elem[ i++ ] ) ) { + + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[ 1 ] = match[ 1 ].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[ 3 ] = ( match[ 3 ] || match[ 4 ] || + match[ 5 ] || "" ).replace( runescape, funescape ); + + if ( match[ 2 ] === "~=" ) { + match[ 3 ] = " " + match[ 3 ] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[ 1 ] = match[ 1 ].toLowerCase(); + + if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { + + // nth-* requires argument + if ( !match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[ 4 ] = +( match[ 4 ] ? + match[ 5 ] + ( match[ 6 ] || 1 ) : + 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); + match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); + + // other types prohibit arguments + } else if ( match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[ 6 ] && match[ 2 ]; + + if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[ 3 ] ) { + match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + + // Get excess from tokenize (recursively) + ( excess = tokenize( unquoted, true ) ) && + + // advance to the next closing parenthesis + ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { + + // excess is a negative index + match[ 0 ] = match[ 0 ].slice( 0, excess ); + match[ 2 ] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { + return true; + } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + ( pattern = new RegExp( "(^|" + whitespace + + ")" + className + "(" + whitespace + "|$)" ) ) && classCache( + className, function( elem ) { + return pattern.test( + typeof elem.className === "string" && elem.className || + typeof elem.getAttribute !== "undefined" && + elem.getAttribute( "class" ) || + "" + ); + } ); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + /* eslint-disable max-len */ + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + /* eslint-enable max-len */ + + }; + }, + + "CHILD": function( type, what, _argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, _context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( ( node = node[ dir ] ) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( ( node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + + // Use previously-cached element index if available + if ( useCache ) { + + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + + // Use the same loop as above to seek `elem` from the start + while ( ( node = ++nodeIndex && node && node[ dir ] || + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || + ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction( function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[ i ] ); + seed[ idx ] = !( matches[ idx ] = matched[ i ] ); + } + } ) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + + // Potentially complex pseudos + "not": markFunction( function( selector ) { + + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction( function( seed, matches, _context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( ( elem = unmatched[ i ] ) ) { + seed[ i ] = !( matches[ i ] = elem ); + } + } + } ) : + function( elem, _context, xml ) { + input[ 0 ] = elem; + matcher( input, null, xml, results ); + + // Don't keep the element (issue #299) + input[ 0 ] = null; + return !results.pop(); + }; + } ), + + "has": markFunction( function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + } ), + + "contains": markFunction( function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; + }; + } ), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + + // lang value must be a valid identifier + if ( !ridentifier.test( lang || "" ) ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( ( elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); + return false; + }; + } ), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && + ( !document.hasFocus || document.hasFocus() ) && + !!( elem.type || elem.href || ~elem.tabIndex ); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return ( nodeName === "input" && !!elem.checked ) || + ( nodeName === "option" && !!elem.selected ); + }, + + "selected": function( elem ) { + + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + // eslint-disable-next-line no-unused-expressions + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos[ "empty" ]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( ( attr = elem.getAttribute( "type" ) ) == null || + attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo( function() { + return [ 0 ]; + } ), + + "last": createPositionalPseudo( function( _matchIndexes, length ) { + return [ length - 1 ]; + } ), + + "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + } ), + + "even": createPositionalPseudo( function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "odd": createPositionalPseudo( function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? + argument + length : + argument > length ? + length : + argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ) + } +}; + +Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || ( match = rcomma.exec( soFar ) ) ) { + if ( match ) { + + // Don't consume trailing commas as valid + soFar = soFar.slice( match[ 0 ].length ) || soFar; + } + groups.push( ( tokens = [] ) ); + } + + matched = false; + + // Combinators + if ( ( match = rcombinators.exec( soFar ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + + // Cast descendant combinators to space + type: match[ 0 ].replace( rtrim, " " ) + } ); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || + ( match = preFilters[ type ]( match ) ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + type: type, + matches: match + } ); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[ i ].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || ( elem[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || + ( outerCache[ elem.uniqueID ] = {} ); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( ( oldCache = uniqueCache[ key ] ) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return ( newCache[ 2 ] = oldCache[ 2 ] ); + } else { + + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[ i ]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[ 0 ]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[ i ], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( ( elem = unmatched[ i ] ) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction( function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( + selector || "*", + context.nodeType ? [ context ] : context, + [] + ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( ( elem = temp[ i ] ) ) { + matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) ) { + + // Restore matcherIn since elem is not yet a final match + temp.push( ( matcherIn[ i ] = elem ) ); + } + } + postFinder( null, ( matcherOut = [] ), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) && + ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { + + seed[ temp ] = !( results[ temp ] = elem ); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + } ); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[ 0 ].type ], + implicitRelative = leadingRelative || Expr.relative[ " " ], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + ( checkContext = context ).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { + matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; + } else { + matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[ j ].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens + .slice( 0, i - 1 ) + .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), + + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), + len = elems.length; + + if ( outermost ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + outermostContext = context == document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( !context && elem.ownerDocument != document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( ( matcher = elementMatchers[ j++ ] ) ) { + if ( matcher( elem, context || document, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + + // They will have gone through all possible matchers + if ( ( elem = !matcher && elem ) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( ( matcher = setMatchers[ j++ ] ) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !( unmatched[ i ] || setMatched[ i ] ) ) { + setMatched[ i ] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[ i ] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( + selector, + matcherFromGroupMatchers( elementMatchers, setMatchers ) + ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( ( selector = compiled.selector || selector ) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[ 0 ] = match[ 0 ].slice( 0 ); + if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { + + context = ( Expr.find[ "ID" ]( token.matches[ 0 ] + .replace( runescape, funescape ), context ) || [] )[ 0 ]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[ i ]; + + // Abort if we hit a combinator + if ( Expr.relative[ ( type = token.type ) ] ) { + break; + } + if ( ( find = Expr.find[ type ] ) ) { + + // Search, expanding context for leading sibling combinators + if ( ( seed = find( + token.matches[ 0 ].replace( runescape, funescape ), + rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || + context + ) ) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert( function( el ) { + + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; +} ); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert( function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute( "href" ) === "#"; +} ) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + } ); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert( function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +} ) ) { + addHandle( "value", function( elem, _name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + } ); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert( function( el ) { + return el.getAttribute( "disabled" ) == null; +} ) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; + } + } ); +} + +return Sizzle; + +} )( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +} +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Filtered directly for both simple and complex selectors + return jQuery.filter( qualifier, elements, not ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, _i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, _i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, _i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( elem.contentDocument != null && + + // Support: IE 11+ + // elements with no `data` attribute has an object + // `contentDocument` with a `null` prototype. + getProto( elem.contentDocument ) ) { + + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && toType( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( _i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // rejected_handlers.disable + // fulfilled_handlers.disable + tuples[ 3 - i ][ 3 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock, + + // progress_handlers.lock + tuples[ 0 ][ 3 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the primary Deferred + primary = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + primary.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( primary.state() === "pending" || + isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return primary.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); + } + + return primary.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( toType( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, _key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; + + +// Matches dashed string for camelizing +var rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g; + +// Used by camelCase as callback to replace() +function fcamelCase( _all, letter ) { + return letter.toUpperCase(); +} + +// Convert dashed to camelCase; used by the css and data modules +// Support: IE <=9 - 11, Edge 12 - 15 +// Microsoft forgot to hump their vendor prefix (#9572) +function camelCase( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); +} +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( camelCase ); + } else { + key = camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var documentElement = document.documentElement; + + + + var isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ); + }, + composed = { composed: true }; + + // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only + // Check attachment across shadow DOM boundaries when possible (gh-3504) + // Support: iOS 10.0-10.2 only + // Early iOS 10 versions support `attachShadow` but not `getRootNode`, + // leading to errors. We need to check for `getRootNode`. + if ( documentElement.getRootNode ) { + isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ) || + elem.getRootNode( composed ) === elem.ownerDocument; + }; + } +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + isAttached( elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, scale, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = elem.nodeType && + ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Support: Firefox <=54 + // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) + initial = initial / 2; + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + while ( maxIterations-- ) { + + // Evaluate and update our best guess (doubling guesses that zero out). + // Finish if the scale equals or crosses 1 (making the old*new product non-positive). + jQuery.style( elem, prop, initialInUnit + unit ); + if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { + maxIterations = 0; + } + initialInUnit = initialInUnit / scale; + + } + + initialInUnit = initialInUnit * 2; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); + +var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); + + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // Support: IE <=9 only + // IE <=9 replaces "; + support.option = !!div.lastChild; +} )(); + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// Support: IE <=9 only +if ( !support.option ) { + wrapMap.optgroup = wrapMap.option = [ 1, "" ]; +} + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, attached, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + attached = isAttached( elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( attached ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 - 11+ +// focus() and blur() are asynchronous, except when they are no-op. +// So expect focus to be synchronous when the element is already active, +// and blur to be synchronous when the element is not already active. +// (focus and blur are always synchronous in other supported browsers, +// this just defines when we can count on it). +function expectSync( elem, type ) { + return ( elem === safeActiveElement() ) === ( type === "focus" ); +} + +// Support: IE <=9 only +// Accessing document.activeElement can throw unexpectedly +// https://bugs.jquery.com/ticket/13393 +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Only attach events to objects that accept data + if ( !acceptData( elem ) ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = Object.create( null ); + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( nativeEvent ), + + handlers = ( + dataPriv.get( this, "events" ) || Object.create( null ) + )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // If the event is namespaced, then each handler is only invoked if it is + // specially universal or its namespaces are a superset of the event's. + if ( !event.rnamespace || handleObj.namespace === false || + event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + click: { + + // Utilize native event to ensure correct state for checkable inputs + setup: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Claim the first handler + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + // dataPriv.set( el, "click", ... ) + leverageNative( el, "click", returnTrue ); + } + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Force setup before triggering a click + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + leverageNative( el, "click" ); + } + + // Return non-false to allow normal event-path propagation + return true; + }, + + // For cross-browser consistency, suppress native .click() on links + // Also prevent it if we're currently inside a leveraged native-event stack + _default: function( event ) { + var target = event.target; + return rcheckableType.test( target.type ) && + target.click && nodeName( target, "input" ) && + dataPriv.get( target, "click" ) || + nodeName( target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +// Ensure the presence of an event listener that handles manually-triggered +// synthetic events by interrupting progress until reinvoked in response to +// *native* events that it fires directly, ensuring that state changes have +// already occurred before other listeners are invoked. +function leverageNative( el, type, expectSync ) { + + // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add + if ( !expectSync ) { + if ( dataPriv.get( el, type ) === undefined ) { + jQuery.event.add( el, type, returnTrue ); + } + return; + } + + // Register the controller as a special universal handler for all event namespaces + dataPriv.set( el, type, false ); + jQuery.event.add( el, type, { + namespace: false, + handler: function( event ) { + var notAsync, result, + saved = dataPriv.get( this, type ); + + if ( ( event.isTrigger & 1 ) && this[ type ] ) { + + // Interrupt processing of the outer synthetic .trigger()ed event + // Saved data should be false in such cases, but might be a leftover capture object + // from an async native handler (gh-4350) + if ( !saved.length ) { + + // Store arguments for use when handling the inner native event + // There will always be at least one argument (an event object), so this array + // will not be confused with a leftover capture object. + saved = slice.call( arguments ); + dataPriv.set( this, type, saved ); + + // Trigger the native event and capture its result + // Support: IE <=9 - 11+ + // focus() and blur() are asynchronous + notAsync = expectSync( this, type ); + this[ type ](); + result = dataPriv.get( this, type ); + if ( saved !== result || notAsync ) { + dataPriv.set( this, type, false ); + } else { + result = {}; + } + if ( saved !== result ) { + + // Cancel the outer synthetic event + event.stopImmediatePropagation(); + event.preventDefault(); + + // Support: Chrome 86+ + // In Chrome, if an element having a focusout handler is blurred by + // clicking outside of it, it invokes the handler synchronously. If + // that handler calls `.remove()` on the element, the data is cleared, + // leaving `result` undefined. We need to guard against this. + return result && result.value; + } + + // If this is an inner synthetic event for an event with a bubbling surrogate + // (focus or blur), assume that the surrogate already propagated from triggering the + // native event and prevent that from happening again here. + // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the + // bubbling surrogate propagates *after* the non-bubbling base), but that seems + // less bad than duplication. + } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { + event.stopPropagation(); + } + + // If this is a native event triggered above, everything is now in order + // Fire an inner synthetic event with the original arguments + } else if ( saved.length ) { + + // ...and capture the result + dataPriv.set( this, type, { + value: jQuery.event.trigger( + + // Support: IE <=9 - 11+ + // Extend with the prototype to reset the above stopImmediatePropagation() + jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), + saved.slice( 1 ), + this + ) + } ); + + // Abort handling of the native event + event.stopImmediatePropagation(); + } + } + } ); +} + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + code: true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + which: true +}, jQuery.event.addProp ); + +jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { + jQuery.event.special[ type ] = { + + // Utilize native event if possible so blur/focus sequence is correct + setup: function() { + + // Claim the first handler + // dataPriv.set( this, "focus", ... ) + // dataPriv.set( this, "blur", ... ) + leverageNative( this, type, expectSync ); + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function() { + + // Force setup before trigger + leverageNative( this, type ); + + // Return non-false to allow normal event-path propagation + return true; + }, + + // Suppress native focus or blur as it's already being fired + // in leverageNative. + _default: function() { + return true; + }, + + delegateType: delegateType + }; +} ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.get( src ); + events = pdataOld.events; + + if ( events ) { + dataPriv.remove( dest, "handle events" ); + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = flat( args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl && !node.noModule ) { + jQuery._evalUrl( node.src, { + nonce: node.nonce || node.getAttribute( "nonce" ) + }, doc ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && isAttached( node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html; + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = isAttached( elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var swap = function( elem, options, callback ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + // Support: Chrome <=64 + // Don't get tricked when zoom affects offsetWidth (gh-4029) + div.style.position = "absolute"; + scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableTrDimensionsVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + }, + + // Support: IE 9 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Behavior in IE 9 is more subtle than in newer versions & it passes + // some versions of this test; make sure not to make it pass there! + // + // Support: Firefox 70+ + // Only Firefox includes border widths + // in computed dimensions. (gh-4529) + reliableTrDimensions: function() { + var table, tr, trChild, trStyle; + if ( reliableTrDimensionsVal == null ) { + table = document.createElement( "table" ); + tr = document.createElement( "tr" ); + trChild = document.createElement( "div" ); + + table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; + tr.style.cssText = "border:1px solid"; + + // Support: Chrome 86+ + // Height set through cssText does not get applied. + // Computed height then comes back as 0. + tr.style.height = "1px"; + trChild.style.height = "9px"; + + // Support: Android 8 Chrome 86+ + // In our bodyBackground.html iframe, + // display for all div elements is set to "inline", + // which causes a problem only in Android 8 Chrome 86. + // Ensuring the div is display: block + // gets around this issue. + trChild.style.display = "block"; + + documentElement + .appendChild( table ) + .appendChild( tr ) + .appendChild( trChild ); + + trStyle = window.getComputedStyle( tr ); + reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + + parseInt( trStyle.borderTopWidth, 10 ) + + parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; + + documentElement.removeChild( table ); + } + return reliableTrDimensionsVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !isAttached( elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style, + vendorProps = {}; + +// Return a vendor-prefixed property or undefined +function vendorPropName( name ) { + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a potentially-mapped jQuery.cssProps or vendor prefixed property +function finalPropName( name ) { + var final = jQuery.cssProps[ name ] || vendorProps[ name ]; + + if ( final ) { + return final; + } + if ( name in emptyStyle ) { + return name; + } + return vendorProps[ name ] = vendorPropName( name ) || name; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }; + +function setPositiveNumber( _elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + + // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter + // Use an explicit zero to avoid NaN (gh-3964) + ) ) || 0; + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). + // Fake content-box until we know it's needed to know the true value. + boxSizingNeeded = !support.boxSizingReliable() || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox, + + val = curCSS( elem, dimension, styles ), + offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + + // Support: IE 9 - 11 only + // Use offsetWidth/offsetHeight for when box sizing is unreliable. + // In those cases, the computed value can be trusted to be border-box. + if ( ( !support.boxSizingReliable() && isBorderBox || + + // Support: IE 10 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Interestingly, in some cases IE 9 doesn't suffer from this issue. + !support.reliableTrDimensions() && nodeName( elem, "tr" ) || + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + val === "auto" || + + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + + // Make sure the element is visible & connected + elem.getClientRects().length ) { + + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Where available, offsetWidth/offsetHeight approximate border box dimensions. + // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the + // retrieved value as a content box dimension. + valueIsBorderBox = offsetProp in elem; + if ( valueIsBorderBox ) { + val = elem[ offsetProp ]; + } + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "gridArea": true, + "gridColumn": true, + "gridColumnEnd": true, + "gridColumnStart": true, + "gridRow": true, + "gridRowEnd": true, + "gridRowStart": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append + // "px" to a few hardcoded values. + if ( type === "number" && !isCustomProp ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( _i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + + // Only read styles.position if the test has a chance to fail + // to avoid forcing a reflow. + scrollboxSizeBuggy = !support.scrollboxSize() && + styles.position === "absolute", + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) + boxSizingNeeded = scrollboxSizeBuggy || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra ? + boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ) : + 0; + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && scrollboxSizeBuggy ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && ( + jQuery.cssHooks[ tween.prop ] || + tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); + + if ( typeof stateVal === "boolean" && isValidValue ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + valueIsFunction = isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( valueIsFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +support.focusin = "onfocusin" in window; + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); + } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + + // Handle: regular nodes (via `this.ownerDocument`), window + // (via `this.document`) & document (via `this`). + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = { guid: Date.now() }; + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml, parserErrorElem; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) {} + + parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; + if ( !xml || parserErrorElem ) { + jQuery.error( "Invalid XML: " + ( + parserErrorElem ? + jQuery.map( parserErrorElem.childNodes, function( el ) { + return el.textContent; + } ).join( "\n" ) : + data + ) ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && toType( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + if ( a == null ) { + return ""; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ).filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ).map( function( _i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + +originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() + " " ] = + ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) + .concat( match[ 2 ] ); + } + } + match = responseHeaders[ key.toLowerCase() + " " ]; + } + return match == null ? null : match.join( ", " ); + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 15 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available and should be processed, append data to url + if ( s.data && ( s.processData || typeof s.data === "string" ) ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Use a noop converter for missing script but not if jsonp + if ( !isSuccess && + jQuery.inArray( "script", s.dataTypes ) > -1 && + jQuery.inArray( "json", s.dataTypes ) < 0 ) { + s.converters[ "text script" ] = function() {}; + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( _i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + +jQuery.ajaxPrefilter( function( s ) { + var i; + for ( i in s.headers ) { + if ( i.toLowerCase() === "content-type" ) { + s.contentType = s.headers[ i ] || ""; + } + } +} ); + + +jQuery._evalUrl = function( url, options, doc ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + + // Only evaluate the response if it is successful (gh-4126) + // dataFilter is not invoked for failure responses, so using it instead + // of the default converter is kludgy but it works. + converters: { + "text script": function() {} + }, + dataFilter: function( response ) { + jQuery.globalEval( response, options, doc ); + } + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var htmlIsFunction = isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.ontimeout = + xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain or forced-by-attrs requests + if ( s.crossDomain || s.scriptAttrs ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + + + + + + + + + + + + + + + +
+
+
+ + +
+ + +

Index

+ +
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/core/dbt/docs/build/html/index.html b/core/dbt/docs/build/html/index.html new file mode 100644 index 00000000000..d4238bb08c3 --- /dev/null +++ b/core/dbt/docs/build/html/index.html @@ -0,0 +1,855 @@ + + + + + + + + + dbt-core’s API documentation — dbt-core documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

dbt-core’s API documentation¶

+
+

Command: build¶

+
+

defer¶

+

Type: boolean

+

If set, defer to the state variable for resolving unselected nodes.

+
+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

fail_fast¶

+

Type: boolean

+

Stop execution on first failure.

+
+
+

full_refresh¶

+

Type: boolean

+

If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

+
+
+

indirect_selection¶

+

Type: choice: [‘eager’, ‘cautious’]

+

Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

+
+
+

log_path¶

+

Type: path

+

Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

show¶

+

Type: boolean

+

Show a sample of the loaded data in the terminal

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

store_failures¶

+

Type: boolean

+

Store test results (failing rows) in the database

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

target_path¶

+

Type: path

+

Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+

Command: clean¶

+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+

Command: compile¶

+
+

defer¶

+

Type: boolean

+

If set, defer to the state variable for resolving unselected nodes.

+
+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

full_refresh¶

+

Type: boolean

+

If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

+
+
+

log_path¶

+

Type: path

+

Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

parse_only¶

+

Type: boolean

+

TODO: No help text currently available

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

target_path¶

+

Type: path

+

Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+

Command: debug¶

+
+

config_dir¶

+

Type: string

+

If specified, DBT will show path information for this project

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+

Command: deps¶

+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+

Command: docs¶

+

Command: init¶

+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

skip_profile_setup¶

+

Type: boolean

+

Skip interative profile setup.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+

Command: list¶

+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

indirect_selection¶

+

Type: choice: [‘eager’, ‘cautious’]

+

Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

output¶

+

Type: choice: [‘json’, ‘name’, ‘path’, ‘selector’]

+

TODO: No current help text

+
+
+

output_keys¶

+

Type: string

+

TODO: No current help text

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

resource_type¶

+

Type: choice: [‘metric’, ‘source’, ‘analysis’, ‘model’, ‘test’, ‘exposure’, ‘snapshot’, ‘seed’, ‘default’, ‘all’]

+

TODO: No current help text

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+

Command: parse¶

+
+

compile¶

+

Type: boolean

+

TODO: No help text currently available

+
+
+

log_path¶

+

Type: path

+

Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

target_path¶

+

Type: path

+

Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+
+

write_manifest¶

+

Type: boolean

+

TODO: No help text currently available

+
+

Command: run¶

+
+

defer¶

+

Type: boolean

+

If set, defer to the state variable for resolving unselected nodes.

+
+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

fail_fast¶

+

Type: boolean

+

Stop execution on first failure.

+
+
+

full_refresh¶

+

Type: boolean

+

If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

+
+
+

log_path¶

+

Type: path

+

Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

target_path¶

+

Type: path

+

Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+

Command: run_operation¶

+
+

args¶

+

Type: YAML

+

Supply arguments to the macro. This dictionary will be mapped to the keyword arguments defined in the selected macro. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+

Command: seed¶

+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

full_refresh¶

+

Type: boolean

+

If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

+
+
+

log_path¶

+

Type: path

+

Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

show¶

+

Type: boolean

+

Show a sample of the loaded data in the terminal

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

target_path¶

+

Type: path

+

Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+

Command: snapshot¶

+
+

defer¶

+

Type: boolean

+

If set, defer to the state variable for resolving unselected nodes.

+
+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+

Command: source¶

+

Command: test¶

+
+

defer¶

+

Type: boolean

+

If set, defer to the state variable for resolving unselected nodes.

+
+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

fail_fast¶

+

Type: boolean

+

Stop execution on first failure.

+
+
+

indirect_selection¶

+

Type: choice: [‘eager’, ‘cautious’]

+

Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

+
+
+

log_path¶

+

Type: path

+

Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

store_failures¶

+

Type: boolean

+

Store test results (failing rows) in the database

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

target_path¶

+

Type: path

+

Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/core/dbt/docs/build/html/objects.inv b/core/dbt/docs/build/html/objects.inv new file mode 100644 index 0000000000000000000000000000000000000000..e46f393260842c5dac58a56bf83b50a2e3e25f20 GIT binary patch literal 250 zcmY#Z2rkIT%&Sny%qvUHE6FdaR47X=D$dN$Q!wIERtPA{&q_@$u~JA$D$z~OFG>Z9 zg+b)46oBlIj8ui9)ZE0(yp+@;h5R&yl8nq^g|y6^R0SZNoS$1zlv-SznxarzoSB!d zP?eLJq{mfp%iI6#=`-G*YXda&JU5>5^zk|yc)>KpqrK}@>XawXG%A{A#Bii){yeGR z>Y)`h$*W}Y=k6eR(=$3@1w0=@7_>Cc`kvMH)H!+P{Nqz+{SPL2Ixf}oVM^-Cuk*_E u)4F1%63KLnNz2>l)8~Lv@#Sh_s_6@|S~?uRusA + + + + + + Search — dbt-core documentation + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Search

+ + + + +

+ Searching for multiple words only shows matches that contain + all words. +

+ + +
+ + + +
+ + + +
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/core/dbt/docs/build/html/searchindex.js b/core/dbt/docs/build/html/searchindex.js new file mode 100644 index 00000000000..25dd9fd3af5 --- /dev/null +++ b/core/dbt/docs/build/html/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "string": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "from": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "select": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "those": 0, "have": 0, "been": 0, "explicitli": 0, "path": 0, "configur": 0, "log": 0, "onli": 0, "appli": 0, "thi": 0, "current": 0, "overrid": 0, "dbt_log_path": 0, "i": 0, "includ": 0, "which": 0, "load": 0, "dbt_project": 0, "yml": 0, "directori": 0, "look": 0, "file": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "The": 0, "name": 0, "us": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "project": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "metric": 0, "analysi": 0, "exposur": 0, "macro": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "command": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "log_path": 0, "model": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "resource_typ": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "log_path": [[0, "build|log_path"], [0, "compile|log_path"], [0, "parse|log_path"], [0, "run|log_path"], [0, "seed|log_path"], [0, "test|log_path"]], "models": [[0, "build|models"], [0, "compile|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"]], "output": [[0, "list|output"]], "output_keys": [[0, "list|output_keys"]], "resource_type": [[0, "list|resource_type"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file diff --git a/core/dbt/docs/source/conf.py b/core/dbt/docs/source/conf.py index 17ff44e41a0..d9962bbfc8b 100644 --- a/core/dbt/docs/source/conf.py +++ b/core/dbt/docs/source/conf.py @@ -7,7 +7,7 @@ # For the full list of built-in configuration values, see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html -sys.path.insert(0, os.path.abspath("../..")) +sys.path.insert(0, os.path.abspath("../../..")) sys.path.insert(0, os.path.abspath("./_ext")) # -- Project information ----------------------------------------------------- diff --git a/core/dbt/events/README.md b/core/dbt/events/README.md index cadc59ab126..52edd7d35d4 100644 --- a/core/dbt/events/README.md +++ b/core/dbt/events/README.md @@ -8,9 +8,10 @@ The event module provides types that represent what is happening in dbt in `even When events are processed via `fire_event`, nearly everything is logged. Whether or not the user has enabled the debug flag, all debug messages are still logged to the file. However, some events are particularly time consuming to construct because they return a huge amount of data. Today, the only messages in this category are cache events and are only logged if the `--log-cache-events` flag is on. This is important because these messages should not be created unless they are going to be logged, because they cause a noticable performance degredation. These events use a "fire_event_if" functions. # Adding a New Event -New events need to have a proto message definition created in core/dbt/events/types.proto. Every message must include EventInfo as the first field, named "info" and numbered 1. To update the proto_types.py file, in the core/dbt/events directory: ```protoc --python_betterproto_out . types.proto``` - -A matching class needs to be created in the core/dbt/events/types.py file, which will have two superclasses, the "Level" mixin and the generated class from proto_types.py. These classes will also generally have two methods, a "code" method that returns the event code, and a "message" method that is used to construct the "msg" from the event fields. In addition the "Level" mixin will provide a "level_tag" method to set the level (which can also be overridden using the "info" convenience function from functions.py) +* Add a new message in types.proto with an EventInfo field first +* run the protoc compiler to update proto_types.py: ```protoc --python_betterproto_out . types.proto``` +* Add a wrapping class in core/dbt/event/types.py with a Level superclass and the superclass from proto_types.py, plus code and message methods +* Add the class to tests/unit/test_events.py Note that no attributes can exist in these event classes except for fields defined in the protobuf definitions, because the betterproto metaclass will throw an error. Betterproto provides a to_dict() method to convert the generated classes to a dictionary and from that to json. However some attributes will successfully convert to dictionaries but not to serialized protobufs, so we need to test both output formats. diff --git a/core/dbt/events/adapter_endpoint.py b/core/dbt/events/adapter_endpoint.py index 68a73d8aecb..c26ac376437 100644 --- a/core/dbt/events/adapter_endpoint.py +++ b/core/dbt/events/adapter_endpoint.py @@ -1,6 +1,7 @@ import traceback from dataclasses import dataclass from dbt.events.functions import fire_event +from dbt.events.contextvars import get_node_info from dbt.events.types import ( AdapterEventDebug, AdapterEventInfo, @@ -15,27 +16,39 @@ class AdapterLogger: name: str def debug(self, msg, *args): - event = AdapterEventDebug(name=self.name, base_msg=msg, args=args) + event = AdapterEventDebug( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) def info(self, msg, *args): - event = AdapterEventInfo(name=self.name, base_msg=msg, args=args) + event = AdapterEventInfo( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) def warning(self, msg, *args): - event = AdapterEventWarning(name=self.name, base_msg=msg, args=args) + event = AdapterEventWarning( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) def error(self, msg, *args): - event = AdapterEventError(name=self.name, base_msg=msg, args=args) + event = AdapterEventError( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) # The default exc_info=True is what makes this method different def exception(self, msg, *args): - event = AdapterEventError(name=self.name, base_msg=msg, args=args) + event = AdapterEventError( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) event.exc_info = traceback.format_exc() fire_event(event) def critical(self, msg, *args): - event = AdapterEventError(name=self.name, base_msg=msg, args=args) + event = AdapterEventError( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) diff --git a/core/dbt/events/base_types.py b/core/dbt/events/base_types.py index cd3275c02a9..db74016099a 100644 --- a/core/dbt/events/base_types.py +++ b/core/dbt/events/base_types.py @@ -1,4 +1,5 @@ from dataclasses import dataclass +from enum import Enum import os import threading from datetime import datetime @@ -43,13 +44,25 @@ def get_thread_name() -> str: return threading.current_thread().name +# EventLevel is an Enum, but mixing in the 'str' type is suggested in the Python +# documentation, and provides support for json conversion, which fails otherwise. +class EventLevel(str, Enum): + DEBUG = "debug" + TEST = "test" + INFO = "info" + WARN = "warn" + ERROR = "error" + + @dataclass class BaseEvent: """BaseEvent for proto message generated python events""" def __post_init__(self): super().__post_init__() - self.info.level = self.level_tag() + if not self.info.level: + self.info.level = self.level_tag() + assert self.info.level in ["info", "warn", "error", "debug", "test"] if not hasattr(self.info, "msg") or not self.info.msg: self.info.msg = self.message() self.info.invocation_id = get_invocation_id() @@ -60,43 +73,55 @@ def __post_init__(self): self.info.code = self.code() self.info.name = type(self).__name__ - def level_tag(self): - raise Exception("level_tag() not implemented for event") + # This is here because although we know that info should always + # exist, mypy doesn't. + def log_level(self) -> EventLevel: + return self.info.level # type: ignore - def message(self): + def level_tag(self) -> EventLevel: + return EventLevel.DEBUG + + def message(self) -> str: raise Exception("message() not implemented for event") +# DynamicLevel requires that the level be supplied on the +# event construction call using the "info" function from functions.py +@dataclass # type: ignore[misc] +class DynamicLevel(BaseEvent): + pass + + @dataclass class TestLevel(BaseEvent): __test__ = False - def level_tag(self) -> str: - return "test" + def level_tag(self) -> EventLevel: + return EventLevel.TEST @dataclass # type: ignore[misc] class DebugLevel(BaseEvent): - def level_tag(self) -> str: - return "debug" + def level_tag(self) -> EventLevel: + return EventLevel.DEBUG @dataclass # type: ignore[misc] class InfoLevel(BaseEvent): - def level_tag(self) -> str: - return "info" + def level_tag(self) -> EventLevel: + return EventLevel.INFO @dataclass # type: ignore[misc] class WarnLevel(BaseEvent): - def level_tag(self) -> str: - return "warn" + def level_tag(self) -> EventLevel: + return EventLevel.WARN @dataclass # type: ignore[misc] class ErrorLevel(BaseEvent): - def level_tag(self) -> str: - return "error" + def level_tag(self) -> EventLevel: + return EventLevel.ERROR # Included to ensure classes with str-type message members are initialized correctly. diff --git a/core/dbt/events/contextvars.py b/core/dbt/events/contextvars.py new file mode 100644 index 00000000000..4aa507eb29b --- /dev/null +++ b/core/dbt/events/contextvars.py @@ -0,0 +1,84 @@ +import contextlib +import contextvars + +from typing import Any, Generator, Mapping, Dict +from dbt.events.proto_types import NodeInfo + + +LOG_PREFIX = "log_" +LOG_PREFIX_LEN = len(LOG_PREFIX) + +_log_context_vars: Dict[str, contextvars.ContextVar] = {} + + +def get_contextvars() -> Dict[str, Any]: + rv = {} + ctx = contextvars.copy_context() + + for k in ctx: + if k.name.startswith(LOG_PREFIX) and ctx[k] is not Ellipsis: + rv[k.name[LOG_PREFIX_LEN:]] = ctx[k] + + return rv + + +def get_node_info(): + cvars = get_contextvars() + if "node_info" in cvars: + return cvars["node_info"] + else: + return NodeInfo() + + +def clear_contextvars() -> None: + ctx = contextvars.copy_context() + for k in ctx: + if k.name.startswith(LOG_PREFIX): + k.set(Ellipsis) + + +# put keys and values into context. Returns the contextvar.Token mapping +# Save and pass to reset_contextvars +def set_contextvars(**kwargs: Any) -> Mapping[str, contextvars.Token]: + cvar_tokens = {} + for k, v in kwargs.items(): + log_key = f"{LOG_PREFIX}{k}" + try: + var = _log_context_vars[log_key] + except KeyError: + var = contextvars.ContextVar(log_key, default=Ellipsis) + _log_context_vars[log_key] = var + + cvar_tokens[k] = var.set(v) + + return cvar_tokens + + +# reset by Tokens +def reset_contextvars(**kwargs: contextvars.Token) -> None: + for k, v in kwargs.items(): + log_key = f"{LOG_PREFIX}{k}" + var = _log_context_vars[log_key] + var.reset(v) + + +# remove from contextvars +def unset_contextvars(*keys: str) -> None: + for k in keys: + if k in _log_context_vars: + log_key = f"{LOG_PREFIX}{k}" + _log_context_vars[log_key].set(Ellipsis) + + +# Context manager or decorator to set and unset the context vars +@contextlib.contextmanager +def log_contextvars(**kwargs: Any) -> Generator[None, None, None]: + context = get_contextvars() + saved = {k: context[k] for k in context.keys() & kwargs.keys()} + + set_contextvars(**kwargs) + try: + yield + finally: + unset_contextvars(*kwargs.keys()) + set_contextvars(**saved) diff --git a/core/dbt/events/eventmgr.py b/core/dbt/events/eventmgr.py new file mode 100644 index 00000000000..97a7d5d4360 --- /dev/null +++ b/core/dbt/events/eventmgr.py @@ -0,0 +1,212 @@ +from colorama import Style +from dataclasses import dataclass +from datetime import datetime +from enum import Enum +import json +import logging +from logging.handlers import RotatingFileHandler +import threading +from typing import Any, Callable, List, Optional, TextIO +from uuid import uuid4 + +from dbt.events.base_types import BaseEvent, EventLevel + + +# A Filter is a function which takes a BaseEvent and returns True if the event +# should be logged, False otherwise. +Filter = Callable[[BaseEvent], bool] + + +# Default filter which logs every event +def NoFilter(_: BaseEvent) -> bool: + return True + + +# A Scrubber removes secrets from an input string, returning a sanitized string. +Scrubber = Callable[[str], str] + + +# Provide a pass-through scrubber implementation, also used as a default +def NoScrubber(s: str) -> str: + return s + + +class LineFormat(Enum): + PlainText = 1 + DebugText = 2 + Json = 3 + + +# Map from dbt event levels to python log levels +_log_level_map = { + EventLevel.DEBUG: 10, + EventLevel.TEST: 10, + EventLevel.INFO: 20, + EventLevel.WARN: 30, + EventLevel.ERROR: 40, +} + + +# We should consider fixing the problem, but log_level() can return a string for +# DynamicLevel events, even thought it is supposed to return an EventLevel. This +# function gets a string for the level, no matter what. +def _get_level_str(e: BaseEvent) -> str: + return e.log_level().value if isinstance(e.log_level(), EventLevel) else str(e.log_level()) + + +# We need this function for now because the numeric log severity levels in +# Python do not match those for logbook, so we have to explicitly call the +# correct function by name. +def send_to_logger(l, level: str, log_line: str): + if level == "test": + l.debug(log_line) + elif level == "debug": + l.debug(log_line) + elif level == "info": + l.info(log_line) + elif level == "warn": + l.warning(log_line) + elif level == "error": + l.error(log_line) + else: + raise AssertionError( + f"While attempting to log {log_line}, encountered the unhandled level: {level}" + ) + + +@dataclass +class LoggerConfig: + name: str + filter: Filter = NoFilter + scrubber: Scrubber = NoScrubber + line_format: LineFormat = LineFormat.PlainText + level: EventLevel = EventLevel.WARN + use_colors: bool = False + output_stream: Optional[TextIO] = None + output_file_name: Optional[str] = None + logger: Optional[Any] = None + + +class _Logger: + def __init__(self, event_manager: "EventManager", config: LoggerConfig) -> None: + self.name: str = config.name + self.filter: Filter = config.filter + self.scrubber: Scrubber = config.scrubber + self.level: EventLevel = config.level + self.event_manager: EventManager = event_manager + self._python_logger: Optional[logging.Logger] = config.logger + self._stream: Optional[TextIO] = config.output_stream + + if config.output_file_name: + log = logging.getLogger(config.name) + log.setLevel(_log_level_map[config.level]) + handler = RotatingFileHandler( + filename=str(config.output_file_name), + encoding="utf8", + maxBytes=10 * 1024 * 1024, # 10 mb + backupCount=5, + ) + + handler.setFormatter(logging.Formatter(fmt="%(message)s")) + log.handlers.clear() + log.addHandler(handler) + + self._python_logger = log + + def create_line(self, e: BaseEvent) -> str: + raise NotImplementedError() + + def write_line(self, e: BaseEvent): + line = self.create_line(e) + python_level = _log_level_map[e.log_level()] + if self._python_logger is not None: + send_to_logger(self._python_logger, _get_level_str(e), line) + elif self._stream is not None and _log_level_map[self.level] <= python_level: + self._stream.write(line + "\n") + + def flush(self): + if self._python_logger is not None: + for handler in self._python_logger.handlers: + handler.flush() + elif self._stream is not None: + self._stream.flush() + + +class _TextLogger(_Logger): + def __init__(self, event_manager: "EventManager", config: LoggerConfig) -> None: + super().__init__(event_manager, config) + self.use_colors = config.use_colors + self.use_debug_format = config.line_format == LineFormat.DebugText + + def create_line(self, e: BaseEvent) -> str: + return self.create_debug_line(e) if self.use_debug_format else self.create_info_line(e) + + def create_info_line(self, e: BaseEvent) -> str: + ts: str = datetime.utcnow().strftime("%H:%M:%S") + scrubbed_msg: str = self.scrubber(e.message()) # type: ignore + return f"{self._get_color_tag()}{ts} {scrubbed_msg}" + + def create_debug_line(self, e: BaseEvent) -> str: + log_line: str = "" + # Create a separator if this is the beginning of an invocation + # TODO: This is an ugly hack, get rid of it if we can + if type(e).__name__ == "MainReportVersion": + separator = 30 * "=" + log_line = f"\n\n{separator} {datetime.utcnow()} | {self.event_manager.invocation_id} {separator}\n" + ts: str = datetime.utcnow().strftime("%H:%M:%S.%f") + scrubbed_msg: str = self.scrubber(e.message()) # type: ignore + level = _get_level_str(e) + log_line += ( + f"{self._get_color_tag()}{ts} [{level:<5}]{self._get_thread_name()} {scrubbed_msg}" + ) + return log_line + + def _get_color_tag(self) -> str: + return "" if not self.use_colors else Style.RESET_ALL + + def _get_thread_name(self) -> str: + thread_name = "" + if threading.current_thread().name: + thread_name = threading.current_thread().name + thread_name = thread_name[:10] + thread_name = thread_name.ljust(10, " ") + thread_name = f" [{thread_name}]:" + return thread_name + + +class _JsonLogger(_Logger): + def create_line(self, e: BaseEvent) -> str: + from dbt.events.functions import event_to_dict + + event_dict = event_to_dict(e) + raw_log_line = json.dumps(event_dict, sort_keys=True) + line = self.scrubber(raw_log_line) # type: ignore + return line + + +class EventManager: + def __init__(self) -> None: + self.loggers: List[_Logger] = [] + self.callbacks: List[Callable[[BaseEvent], None]] = [] + self.invocation_id: str = str(uuid4()) + + def fire_event(self, e: BaseEvent) -> None: + for logger in self.loggers: + if logger.filter(e): # type: ignore + logger.write_line(e) + + for callback in self.callbacks: + callback(e) + + def add_logger(self, config: LoggerConfig): + logger = ( + _JsonLogger(self, config) + if config.line_format == LineFormat.Json + else _TextLogger(self, config) + ) + logger.event_manager = self + self.loggers.append(logger) + + def flush(self): + for logger in self.loggers: + logger.flush() diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index 7a652a998f6..ff5b267bc5e 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -1,125 +1,151 @@ import betterproto -import io +from dbt.constants import METADATA_ENV_PREFIX +from dbt.events.base_types import BaseEvent, Cache, EventLevel, NoFile, NoStdOut +from dbt.events.eventmgr import EventManager, LoggerConfig, LineFormat, NoFilter +from dbt.events.helpers import env_secrets, scrub_secrets +from dbt.events.proto_types import EventInfo +from dbt.events.types import EmptyLine +import dbt.flags as flags +from dbt.logger import GLOBAL_LOGGER, make_log_dir_if_missing +from functools import partial import json -import logging import os import sys -import threading +from typing import Callable, Dict, Optional, TextIO import uuid -from collections import deque -from datetime import datetime -from io import StringIO, TextIOWrapper -from logging import Logger -from logging.handlers import RotatingFileHandler -from typing import Callable, Dict, List, Optional, Union -import dbt.flags as flags -import logbook -from colorama import Style -from dbt.constants import METADATA_ENV_PREFIX, SECRET_ENV_PREFIX -from dbt.events.base_types import BaseEvent, Cache, NoFile, NoStdOut -from dbt.events.types import EmptyLine, EventBufferFull, MainReportVersion -from dbt.logger import make_log_dir_if_missing -# create the module-globals -LOG_VERSION = 2 -EVENT_HISTORY = None +LOG_VERSION = 3 +metadata_vars: Optional[Dict[str, str]] = None -DEFAULT_FILE_LOGGER_NAME = "default_file" -FILE_LOG = logging.getLogger(DEFAULT_FILE_LOGGER_NAME) -DEFAULT_STDOUT_LOGGER_NAME = "default_std_out" -STDOUT_LOG = logging.getLogger(DEFAULT_STDOUT_LOGGER_NAME) +def setup_event_logger(log_path: str, log_format: str, use_colors: bool, debug: bool): + cleanup_event_logger() + make_log_dir_if_missing(log_path) -invocation_id: Optional[str] = None -metadata_vars: Optional[Dict[str, str]] = None + if flags.ENABLE_LEGACY_LOGGER: + EVENT_MANAGER.add_logger(_get_logbook_log_config(debug)) + else: + EVENT_MANAGER.add_logger(_get_stdout_config(log_format, debug, use_colors)) + + if _CAPTURE_STREAM: + # Create second stdout logger to support test which want to know what's + # being sent to stdout. + # debug here is true because we need to capture debug events, and we pass in false in main + capture_config = _get_stdout_config(log_format, True, use_colors) + capture_config.output_stream = _CAPTURE_STREAM + EVENT_MANAGER.add_logger(capture_config) + + # create and add the file logger to the event manager + EVENT_MANAGER.add_logger( + _get_logfile_config(os.path.join(log_path, "dbt.log"), use_colors, log_format) + ) -def setup_event_logger(log_path, log_format, use_colors, debug): - global FILE_LOG - global STDOUT_LOG +def _get_stdout_config(log_format: str, debug: bool, use_colors: bool) -> LoggerConfig: + fmt = LineFormat.PlainText + if log_format == "json": + fmt = LineFormat.Json + elif debug: + fmt = LineFormat.DebugText + level = EventLevel.DEBUG if debug else EventLevel.INFO + + return LoggerConfig( + name="stdout_log", + level=level, + use_colors=use_colors, + line_format=fmt, + scrubber=env_scrubber, + filter=partial( + _stdout_filter, bool(flags.LOG_CACHE_EVENTS), debug, bool(flags.QUIET), log_format + ), + output_stream=sys.stdout, + ) - make_log_dir_if_missing(log_path) - # TODO this default should live somewhere better - log_dest = os.path.join(log_path, "dbt.log") - level = logging.DEBUG if debug else logging.INFO - - # overwrite the STDOUT_LOG logger with the configured one - STDOUT_LOG = logging.getLogger("configured_std_out") - STDOUT_LOG.setLevel(level) - setattr(STDOUT_LOG, "format_json", log_format == "json") - setattr(STDOUT_LOG, "format_color", True if use_colors else False) - - FORMAT = "%(message)s" - stdout_passthrough_formatter = logging.Formatter(fmt=FORMAT) - - stdout_handler = logging.StreamHandler(sys.stdout) - stdout_handler.setFormatter(stdout_passthrough_formatter) - stdout_handler.setLevel(level) - # clear existing stdout TextIOWrapper stream handlers - STDOUT_LOG.handlers = [ - h - for h in STDOUT_LOG.handlers - if not (hasattr(h, "stream") and isinstance(h.stream, TextIOWrapper)) # type: ignore - ] - STDOUT_LOG.addHandler(stdout_handler) - - # overwrite the FILE_LOG logger with the configured one - FILE_LOG = logging.getLogger("configured_file") - FILE_LOG.setLevel(logging.DEBUG) # always debug regardless of user input - setattr(FILE_LOG, "format_json", log_format == "json") - setattr(FILE_LOG, "format_color", True if use_colors else False) - - file_passthrough_formatter = logging.Formatter(fmt=FORMAT) - - file_handler = RotatingFileHandler( - filename=log_dest, encoding="utf8", maxBytes=10 * 1024 * 1024, backupCount=5 # 10 mb +def _stdout_filter( + log_cache_events: bool, debug_mode: bool, quiet_mode: bool, log_format: str, evt: BaseEvent +) -> bool: + return ( + not isinstance(evt, NoStdOut) + and (not isinstance(evt, Cache) or log_cache_events) + and (evt.log_level() != EventLevel.DEBUG or debug_mode) + and (evt.log_level() == EventLevel.ERROR or not quiet_mode) + and not (log_format == "json" and type(evt) == EmptyLine) ) - file_handler.setFormatter(file_passthrough_formatter) - file_handler.setLevel(logging.DEBUG) # always debug regardless of user input - FILE_LOG.handlers.clear() - FILE_LOG.addHandler(file_handler) -# used for integration tests -def capture_stdout_logs() -> StringIO: - global STDOUT_LOG - capture_buf = io.StringIO() - stdout_capture_handler = logging.StreamHandler(capture_buf) - stdout_capture_handler.setLevel(logging.DEBUG) - STDOUT_LOG.addHandler(stdout_capture_handler) - return capture_buf +def _get_logfile_config(log_path: str, use_colors: bool, log_format: str) -> LoggerConfig: + return LoggerConfig( + name="file_log", + line_format=LineFormat.Json if log_format == "json" else LineFormat.DebugText, + use_colors=use_colors, + level=EventLevel.DEBUG, # File log is *always* debug level + scrubber=env_scrubber, + filter=partial(_logfile_filter, bool(flags.LOG_CACHE_EVENTS), log_format), + output_file_name=log_path, + ) -# used for integration tests -def stop_capture_stdout_logs() -> None: - global STDOUT_LOG - STDOUT_LOG.handlers = [ - h - for h in STDOUT_LOG.handlers - if not (hasattr(h, "stream") and isinstance(h.stream, StringIO)) # type: ignore - ] +def _logfile_filter(log_cache_events: bool, log_format: str, evt: BaseEvent) -> bool: + return ( + not isinstance(evt, NoFile) + and not (isinstance(evt, Cache) and not log_cache_events) + and not (log_format == "json" and type(evt) == EmptyLine) + ) + + +def _get_logbook_log_config(debug: bool) -> LoggerConfig: + # use the default one since this code should be removed when we remove logbook + config = _get_stdout_config("", debug, bool(flags.USE_COLORS)) + config.name = "logbook_log" + config.filter = NoFilter if flags.LOG_CACHE_EVENTS else lambda e: not isinstance(e, Cache) + config.logger = GLOBAL_LOGGER + return config + + +def env_scrubber(msg: str) -> str: + return scrub_secrets(msg, env_secrets()) -def env_secrets() -> List[str]: - return [v for k, v in os.environ.items() if k.startswith(SECRET_ENV_PREFIX) and v.strip()] +def cleanup_event_logger(): + # Reset to a no-op manager to release streams associated with logs. This is + # especially important for tests, since pytest replaces the stdout stream + # during test runs, and closes the stream after the test is over. + EVENT_MANAGER.loggers.clear() + EVENT_MANAGER.callbacks.clear() -def scrub_secrets(msg: str, secrets: List[str]) -> str: - scrubbed = msg +# Since dbt-rpc does not do its own log setup, and since some events can +# currently fire before logs can be configured by setup_event_logger(), we +# create a default configuration with default settings and no file output. +EVENT_MANAGER: EventManager = EventManager() +EVENT_MANAGER.add_logger( + _get_logbook_log_config(flags.DEBUG) # type: ignore + if flags.ENABLE_LEGACY_LOGGER + else _get_stdout_config(flags.LOG_FORMAT, flags.DEBUG, flags.USE_COLORS) # type: ignore +) - for secret in secrets: - scrubbed = scrubbed.replace(secret, "*****") +# This global, and the following two functions for capturing stdout logs are +# an unpleasant hack we intend to remove as part of API-ification. The GitHub +# issue #6350 was opened for that work. +_CAPTURE_STREAM: Optional[TextIO] = None - return scrubbed + +# used for integration tests +def capture_stdout_logs(stream: TextIO): + global _CAPTURE_STREAM + _CAPTURE_STREAM = stream + + +def stop_capture_stdout_logs(): + global _CAPTURE_STREAM + _CAPTURE_STREAM = None # returns a dictionary representation of the event fields. # the message may contain secrets which must be scrubbed at the usage site. -def event_to_json( - event: BaseEvent, -) -> str: +def event_to_json(event: BaseEvent) -> str: event_dict = event_to_dict(event) raw_log_line = json.dumps(event_dict, sort_keys=True) return raw_log_line @@ -128,108 +154,24 @@ def event_to_json( def event_to_dict(event: BaseEvent) -> dict: event_dict = dict() try: - # We could use to_json here, but it wouldn't sort the keys. - # The 'to_json' method just does json.dumps on the dict anyway. event_dict = event.to_dict(casing=betterproto.Casing.SNAKE, include_default_values=True) # type: ignore except AttributeError as exc: event_type = type(event).__name__ raise Exception(f"type {event_type} is not serializable. {str(exc)}") + # We don't want an empty NodeInfo in output + if "node_info" in event_dict and event_dict["node_info"]["node_name"] == "": + del event_dict["node_info"] return event_dict -# translates an Event to a completely formatted text-based log line -# type hinting everything as strings so we don't get any unintentional string conversions via str() -def reset_color() -> str: - return Style.RESET_ALL if getattr(STDOUT_LOG, "format_color", False) else "" - - -def create_info_text_log_line(e: BaseEvent) -> str: - color_tag: str = reset_color() - ts: str = get_ts().strftime("%H:%M:%S") # TODO: get this from the event.ts? - scrubbed_msg: str = scrub_secrets(e.message(), env_secrets()) - log_line: str = f"{color_tag}{ts} {scrubbed_msg}" - return log_line - - -def create_debug_text_log_line(e: BaseEvent) -> str: - log_line: str = "" - # Create a separator if this is the beginning of an invocation - if type(e) == MainReportVersion: - separator = 30 * "=" - log_line = f"\n\n{separator} {get_ts()} | {get_invocation_id()} {separator}\n" - color_tag: str = reset_color() - ts: str = get_ts().strftime("%H:%M:%S.%f") - scrubbed_msg: str = scrub_secrets(e.message(), env_secrets()) - # Make the levels all 5 characters so they line up - level: str = f"{e.level_tag():<5}" - thread = "" - if threading.current_thread().name: - thread_name = threading.current_thread().name - thread_name = thread_name[:10] - thread_name = thread_name.ljust(10, " ") - thread = f" [{thread_name}]:" - log_line = log_line + f"{color_tag}{ts} [{level}]{thread} {scrubbed_msg}" - return log_line - - -# translates an Event to a completely formatted json log line -def create_json_log_line(e: BaseEvent) -> Optional[str]: - if type(e) == EmptyLine: - return None # will not be sent to logger - raw_log_line = event_to_json(e) - return scrub_secrets(raw_log_line, env_secrets()) - - -# calls create_stdout_text_log_line() or create_json_log_line() according to logger config -def create_log_line(e: BaseEvent, file_output=False) -> Optional[str]: - global FILE_LOG - global STDOUT_LOG - - if FILE_LOG.name == DEFAULT_FILE_LOGGER_NAME and STDOUT_LOG.name == DEFAULT_STDOUT_LOGGER_NAME: - - # TODO: This is only necessary because our test framework doesn't correctly set up logging. - # This code should be moved to the test framework when we do CT-XXX (tix # needed) - null_handler = logging.NullHandler() - FILE_LOG.addHandler(null_handler) - setattr(FILE_LOG, "format_json", False) - setattr(FILE_LOG, "format_color", False) - - stdout_handler = logging.StreamHandler(sys.stdout) - stdout_handler.setLevel(logging.INFO) - STDOUT_LOG.setLevel(logging.INFO) - STDOUT_LOG.addHandler(stdout_handler) - setattr(STDOUT_LOG, "format_json", False) - setattr(STDOUT_LOG, "format_color", False) - - logger = FILE_LOG if file_output else STDOUT_LOG - if getattr(logger, "format_json"): - return create_json_log_line(e) # json output, both console and file - elif file_output is True or flags.DEBUG: - return create_debug_text_log_line(e) # default file output - else: - return create_info_text_log_line(e) # console output - - -# allows for reuse of this obnoxious if else tree. -# do not use for exceptions, it doesn't pass along exc_info, stack_info, or extra -def send_to_logger(l: Union[Logger, logbook.Logger], level_tag: str, log_line: str): - if not log_line: - return - if level_tag == "test": - # TODO after implmenting #3977 send to new test level - l.debug(log_line) - elif level_tag == "debug": - l.debug(log_line) - elif level_tag == "info": - l.info(log_line) - elif level_tag == "warn": - l.warning(log_line) - elif level_tag == "error": - l.error(log_line) +def warn_or_error(event, node=None): + if flags.WARN_ERROR: + # TODO: resolve this circular import when at top + from dbt.exceptions import EventCompilationException + + raise EventCompilationException(event.info.msg, node) else: - raise AssertionError( - f"While attempting to log {log_line}, encountered the unhandled level: {level_tag}" - ) + fire_event(event) # an alternative to fire_event which only creates and logs the event value @@ -244,30 +186,7 @@ def fire_event_if(conditional: bool, lazy_e: Callable[[], BaseEvent]) -> None: # (i.e. - mutating the event history, printing to stdout, logging # to files, etc.) def fire_event(e: BaseEvent) -> None: - # skip logs when `--log-cache-events` is not passed - if isinstance(e, Cache) and not flags.LOG_CACHE_EVENTS: - return - - add_to_event_history(e) - - # always logs debug level regardless of user input - if not isinstance(e, NoFile): - log_line = create_log_line(e, file_output=True) - # doesn't send exceptions to exception logger - if log_line: - send_to_logger(FILE_LOG, level_tag=e.level_tag(), log_line=log_line) - - if not isinstance(e, NoStdOut): - # explicitly checking the debug flag here so that potentially expensive-to-construct - # log messages are not constructed if debug messages are never shown. - if e.level_tag() == "debug" and not flags.DEBUG: - return # eat the message in case it was one of the expensive ones - if e.level_tag() != "error" and flags.QUIET: - return # eat all non-exception messages in quiet mode - - log_line = create_log_line(e) - if log_line: - send_to_logger(STDOUT_LOG, level_tag=e.level_tag(), log_line=log_line) + EVENT_MANAGER.fire_event(e) def get_metadata_vars() -> Dict[str, str]: @@ -287,44 +206,18 @@ def reset_metadata_vars() -> None: def get_invocation_id() -> str: - global invocation_id - if invocation_id is None: - invocation_id = str(uuid.uuid4()) - return invocation_id + return EVENT_MANAGER.invocation_id def set_invocation_id() -> None: # This is primarily for setting the invocation_id for separate # commands in the dbt servers. It shouldn't be necessary for the CLI. - global invocation_id - invocation_id = str(uuid.uuid4()) - - -# exactly one time stamp per concrete event -def get_ts() -> datetime: - ts = datetime.utcnow() - return ts - - -# preformatted time stamp -def get_ts_rfc3339() -> str: - ts = get_ts() - ts_rfc3339 = ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ") - return ts_rfc3339 - - -def add_to_event_history(event): - if flags.EVENT_BUFFER_SIZE == 0: - return - global EVENT_HISTORY - if EVENT_HISTORY is None: - reset_event_history() - EVENT_HISTORY.append(event) - # We only set the EventBufferFull message for event buffers >= 10,000 - if flags.EVENT_BUFFER_SIZE >= 10000 and len(EVENT_HISTORY) == (flags.EVENT_BUFFER_SIZE - 1): - fire_event(EventBufferFull()) + EVENT_MANAGER.invocation_id = str(uuid.uuid4()) -def reset_event_history(): - global EVENT_HISTORY - EVENT_HISTORY = deque(maxlen=flags.EVENT_BUFFER_SIZE) +# Currently used to set the level in EventInfo, so logging events can +# provide more than one "level". Might be used in the future to set +# more fields in EventInfo, once some of that information is no longer global +def info(level="info"): + info = EventInfo(level=level) + return info diff --git a/core/dbt/events/helpers.py b/core/dbt/events/helpers.py new file mode 100644 index 00000000000..2570c8653c9 --- /dev/null +++ b/core/dbt/events/helpers.py @@ -0,0 +1,16 @@ +import os +from typing import List +from dbt.constants import SECRET_ENV_PREFIX + + +def env_secrets() -> List[str]: + return [v for k, v in os.environ.items() if k.startswith(SECRET_ENV_PREFIX) and v.strip()] + + +def scrub_secrets(msg: str, secrets: List[str]) -> str: + scrubbed = msg + + for secret in secrets: + scrubbed = scrubbed.replace(secret, "*****") + + return scrubbed diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index d75713285db..5ee384643d3 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -23,6 +23,7 @@ class EventInfo(betterproto.Message): extra: Dict[str, str] = betterproto.map_field( 9, betterproto.TYPE_STRING, betterproto.TYPE_STRING ) + category: str = betterproto.string_field(10) @dataclass @@ -52,7 +53,6 @@ class NodeInfo(betterproto.Message): class RunResultMsg(betterproto.Message): """RunResult""" - # status: Union[RunStatus, TestStatus, FreshnessStatus] status: str = betterproto.string_field(1) message: str = betterproto.string_field(2) timing_info: List["TimingInfoMsg"] = betterproto.message_field(3) @@ -281,14 +281,74 @@ class ProjectCreated(betterproto.Message): slack_url: str = betterproto.string_field(4) +@dataclass +class PackageRedirectDeprecation(betterproto.Message): + """D001""" + + info: "EventInfo" = betterproto.message_field(1) + old_name: str = betterproto.string_field(2) + new_name: str = betterproto.string_field(3) + + +@dataclass +class PackageInstallPathDeprecation(betterproto.Message): + """D002""" + + info: "EventInfo" = betterproto.message_field(1) + + +@dataclass +class ConfigSourcePathDeprecation(betterproto.Message): + """D003""" + + info: "EventInfo" = betterproto.message_field(1) + deprecated_path: str = betterproto.string_field(2) + exp_path: str = betterproto.string_field(3) + + +@dataclass +class ConfigDataPathDeprecation(betterproto.Message): + """D004""" + + info: "EventInfo" = betterproto.message_field(1) + deprecated_path: str = betterproto.string_field(2) + exp_path: str = betterproto.string_field(3) + + +@dataclass +class AdapterDeprecationWarning(betterproto.Message): + """D005""" + + info: "EventInfo" = betterproto.message_field(1) + old_name: str = betterproto.string_field(2) + new_name: str = betterproto.string_field(3) + + +@dataclass +class MetricAttributesRenamed(betterproto.Message): + """D006""" + + info: "EventInfo" = betterproto.message_field(1) + metric_name: str = betterproto.string_field(2) + + +@dataclass +class ExposureNameDeprecation(betterproto.Message): + """D007""" + + info: "EventInfo" = betterproto.message_field(1) + exposure: str = betterproto.string_field(2) + + @dataclass class AdapterEventDebug(betterproto.Message): """E001""" info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - base_msg: str = betterproto.string_field(3) - args: List[str] = betterproto.string_field(4) + node_info: "NodeInfo" = betterproto.message_field(2) + name: str = betterproto.string_field(3) + base_msg: str = betterproto.string_field(4) + args: List[str] = betterproto.string_field(5) @dataclass @@ -296,9 +356,10 @@ class AdapterEventInfo(betterproto.Message): """E002""" info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - base_msg: str = betterproto.string_field(3) - args: List[str] = betterproto.string_field(4) + node_info: "NodeInfo" = betterproto.message_field(2) + name: str = betterproto.string_field(3) + base_msg: str = betterproto.string_field(4) + args: List[str] = betterproto.string_field(5) @dataclass @@ -306,9 +367,10 @@ class AdapterEventWarning(betterproto.Message): """E003""" info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - base_msg: str = betterproto.string_field(3) - args: List[str] = betterproto.string_field(4) + node_info: "NodeInfo" = betterproto.message_field(2) + name: str = betterproto.string_field(3) + base_msg: str = betterproto.string_field(4) + args: List[str] = betterproto.string_field(5) @dataclass @@ -316,10 +378,11 @@ class AdapterEventError(betterproto.Message): """E004""" info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - base_msg: str = betterproto.string_field(3) - args: List[str] = betterproto.string_field(4) - exc_info: str = betterproto.string_field(5) + node_info: "NodeInfo" = betterproto.message_field(2) + name: str = betterproto.string_field(3) + base_msg: str = betterproto.string_field(4) + args: List[str] = betterproto.string_field(5) + exc_info: str = betterproto.string_field(6) @dataclass @@ -327,8 +390,9 @@ class NewConnection(betterproto.Message): """E005""" info: "EventInfo" = betterproto.message_field(1) - conn_type: str = betterproto.string_field(2) - conn_name: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_type: str = betterproto.string_field(3) + conn_name: str = betterproto.string_field(4) @dataclass @@ -340,7 +404,7 @@ class ConnectionReused(betterproto.Message): @dataclass -class ConnectionLeftOpen(betterproto.Message): +class ConnectionLeftOpenInCleanup(betterproto.Message): """E007""" info: "EventInfo" = betterproto.message_field(1) @@ -348,7 +412,7 @@ class ConnectionLeftOpen(betterproto.Message): @dataclass -class ConnectionClosed(betterproto.Message): +class ConnectionClosedInCleanup(betterproto.Message): """E008""" info: "EventInfo" = betterproto.message_field(1) @@ -360,24 +424,27 @@ class RollbackFailed(betterproto.Message): """E009""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) - exc_info: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) + exc_info: str = betterproto.string_field(4) @dataclass -class ConnectionClosed2(betterproto.Message): +class ConnectionClosed(betterproto.Message): """E010""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) @dataclass -class ConnectionLeftOpen2(betterproto.Message): +class ConnectionLeftOpen(betterproto.Message): """E011""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) @dataclass @@ -385,7 +452,8 @@ class Rollback(betterproto.Message): """E012""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) @dataclass @@ -413,8 +481,9 @@ class ConnectionUsed(betterproto.Message): """E015""" info: "EventInfo" = betterproto.message_field(1) - conn_type: str = betterproto.string_field(2) - conn_name: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_type: str = betterproto.string_field(3) + conn_name: str = betterproto.string_field(4) @dataclass @@ -422,8 +491,9 @@ class SQLQuery(betterproto.Message): """E016""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) - sql: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) + sql: str = betterproto.string_field(4) @dataclass @@ -431,8 +501,9 @@ class SQLQueryStatus(betterproto.Message): """E017""" info: "EventInfo" = betterproto.message_field(1) - status: str = betterproto.string_field(2) - elapsed: float = betterproto.float_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + status: str = betterproto.string_field(3) + elapsed: float = betterproto.float_field(4) @dataclass @@ -440,7 +511,8 @@ class SQLCommit(betterproto.Message): """E018""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) @dataclass @@ -608,7 +680,8 @@ class NewConnectionOpening(betterproto.Message): """E037""" info: "EventInfo" = betterproto.message_field(1) - connection_state: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + connection_state: str = betterproto.string_field(3) @dataclass @@ -629,6 +702,14 @@ class CodeExecutionStatus(betterproto.Message): elapsed: float = betterproto.float_field(3) +@dataclass +class CatalogGenerationError(betterproto.Message): + """E040""" + + info: "EventInfo" = betterproto.message_field(1) + exc: str = betterproto.string_field(2) + + @dataclass class WriteCatalogFailure(betterproto.Message): """E041""" @@ -1066,19 +1147,122 @@ class PartialParsingDeletedExposure(betterproto.Message): @dataclass -class InvalidDisabledSourceInTestNode(betterproto.Message): +class InvalidDisabledTargetInTestNode(betterproto.Message): """I050""" info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + resource_type_title: str = betterproto.string_field(2) + unique_id: str = betterproto.string_field(3) + original_file_path: str = betterproto.string_field(4) + target_kind: str = betterproto.string_field(5) + target_name: str = betterproto.string_field(6) + target_package: str = betterproto.string_field(7) @dataclass -class InvalidRefInTestNode(betterproto.Message): +class UnusedResourceConfigPath(betterproto.Message): """I051""" info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + unused_config_paths: List[str] = betterproto.string_field(2) + + +@dataclass +class SeedIncreased(betterproto.Message): + """I052""" + + info: "EventInfo" = betterproto.message_field(1) + package_name: str = betterproto.string_field(2) + name: str = betterproto.string_field(3) + + +@dataclass +class SeedExceedsLimitSamePath(betterproto.Message): + """I053""" + + info: "EventInfo" = betterproto.message_field(1) + package_name: str = betterproto.string_field(2) + name: str = betterproto.string_field(3) + + +@dataclass +class SeedExceedsLimitAndPathChanged(betterproto.Message): + """I054""" + + info: "EventInfo" = betterproto.message_field(1) + package_name: str = betterproto.string_field(2) + name: str = betterproto.string_field(3) + + +@dataclass +class SeedExceedsLimitChecksumChanged(betterproto.Message): + """I055""" + + info: "EventInfo" = betterproto.message_field(1) + package_name: str = betterproto.string_field(2) + name: str = betterproto.string_field(3) + checksum_name: str = betterproto.string_field(4) + + +@dataclass +class UnusedTables(betterproto.Message): + """I056""" + + info: "EventInfo" = betterproto.message_field(1) + unused_tables: List[str] = betterproto.string_field(2) + + +@dataclass +class WrongResourceSchemaFile(betterproto.Message): + """I057""" + + info: "EventInfo" = betterproto.message_field(1) + patch_name: str = betterproto.string_field(2) + resource_type: str = betterproto.string_field(3) + plural_resource_type: str = betterproto.string_field(4) + yaml_key: str = betterproto.string_field(5) + file_path: str = betterproto.string_field(6) + + +@dataclass +class NoNodeForYamlKey(betterproto.Message): + """I058""" + + info: "EventInfo" = betterproto.message_field(1) + patch_name: str = betterproto.string_field(2) + yaml_key: str = betterproto.string_field(3) + file_path: str = betterproto.string_field(4) + + +@dataclass +class MacroPatchNotFound(betterproto.Message): + """I059""" + + info: "EventInfo" = betterproto.message_field(1) + patch_name: str = betterproto.string_field(2) + + +@dataclass +class NodeNotFoundOrDisabled(betterproto.Message): + """I060""" + + info: "EventInfo" = betterproto.message_field(1) + original_file_path: str = betterproto.string_field(2) + unique_id: str = betterproto.string_field(3) + resource_type_title: str = betterproto.string_field(4) + target_name: str = betterproto.string_field(5) + target_kind: str = betterproto.string_field(6) + target_package: str = betterproto.string_field(7) + disabled: str = betterproto.string_field(8) + + +@dataclass +class JinjaLogWarning(betterproto.Message): + """I061""" + + info: "EventInfo" = betterproto.message_field(1) + node_info: "NodeInfo" = betterproto.message_field(2) + msg: str = betterproto.string_field(3) @dataclass @@ -1166,19 +1350,21 @@ class SelectorReportInvalidSelector(betterproto.Message): @dataclass -class MacroEventInfo(betterproto.Message): +class JinjaLogInfo(betterproto.Message): """M011""" info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + msg: str = betterproto.string_field(3) @dataclass -class MacroEventDebug(betterproto.Message): +class JinjaLogDebug(betterproto.Message): """M012""" info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + msg: str = betterproto.string_field(3) @dataclass @@ -1309,6 +1495,23 @@ class DepsSetDownloadDirectory(betterproto.Message): path: str = betterproto.string_field(2) +@dataclass +class DepsUnpinned(betterproto.Message): + """M029""" + + info: "EventInfo" = betterproto.message_field(1) + revision: str = betterproto.string_field(2) + git: str = betterproto.string_field(3) + + +@dataclass +class NoNodesForSelectionCriteria(betterproto.Message): + """M030""" + + info: "EventInfo" = betterproto.message_field(1) + spec_raw: str = betterproto.string_field(2) + + @dataclass class RunningOperationCaughtError(betterproto.Message): """Q001""" @@ -1357,57 +1560,21 @@ class SQLRunnerException(betterproto.Message): @dataclass -class PrintErrorTestResult(betterproto.Message): +class LogTestResult(betterproto.Message): """Q007""" info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) name: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - num_models: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - - -@dataclass -class PrintPassTestResult(betterproto.Message): - """Q008""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - num_models: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - - -@dataclass -class PrintWarnTestResult(betterproto.Message): - """Q009""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - num_models: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - num_failures: int = betterproto.int32_field(7) - - -@dataclass -class PrintFailureTestResult(betterproto.Message): - """Q010""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - num_models: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - num_failures: int = betterproto.int32_field(7) + status: str = betterproto.string_field(4) + index: int = betterproto.int32_field(5) + num_models: int = betterproto.int32_field(6) + execution_time: float = betterproto.float_field(7) + num_failures: int = betterproto.int32_field(8) @dataclass -class PrintStartLine(betterproto.Message): +class LogStartLine(betterproto.Message): """Q011""" info: "EventInfo" = betterproto.message_field(1) @@ -1418,7 +1585,7 @@ class PrintStartLine(betterproto.Message): @dataclass -class PrintModelResultLine(betterproto.Message): +class LogModelResult(betterproto.Message): """Q012""" info: "EventInfo" = betterproto.message_field(1) @@ -1427,40 +1594,11 @@ class PrintModelResultLine(betterproto.Message): status: str = betterproto.string_field(4) index: int = betterproto.int32_field(5) total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) - - -@dataclass -class PrintModelErrorResultLine(betterproto.Message): - """Q013""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - description: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) + execution_time: int = betterproto.int32_field(7) @dataclass -class PrintSnapshotErrorResultLine(betterproto.Message): - """Q014""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - description: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) - cfg: Dict[str, str] = betterproto.map_field( - 8, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - - -@dataclass -class PrintSnapshotResultLine(betterproto.Message): +class LogSnapshotResult(betterproto.Message): """Q015""" info: "EventInfo" = betterproto.message_field(1) @@ -1476,87 +1614,36 @@ class PrintSnapshotResultLine(betterproto.Message): @dataclass -class PrintSeedErrorResultLine(betterproto.Message): +class LogSeedResult(betterproto.Message): """Q016""" info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) status: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - total: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - schema: str = betterproto.string_field(7) - relation: str = betterproto.string_field(8) + result_message: str = betterproto.string_field(4) + index: int = betterproto.int32_field(5) + total: int = betterproto.int32_field(6) + execution_time: float = betterproto.float_field(7) + schema: str = betterproto.string_field(8) + relation: str = betterproto.string_field(9) @dataclass -class PrintSeedResultLine(betterproto.Message): - """Q017""" +class LogFreshnessResult(betterproto.Message): + """Q018""" info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - status: str = betterproto.string_field(3) + status: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(3) index: int = betterproto.int32_field(4) total: int = betterproto.int32_field(5) execution_time: float = betterproto.float_field(6) - schema: str = betterproto.string_field(7) - relation: str = betterproto.string_field(8) - - -@dataclass -class PrintFreshnessErrorLine(betterproto.Message): - """Q018""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - index: int = betterproto.int32_field(3) - total: int = betterproto.int32_field(4) - execution_time: float = betterproto.float_field(5) - source_name: str = betterproto.string_field(6) - table_name: str = betterproto.string_field(7) + source_name: str = betterproto.string_field(7) + table_name: str = betterproto.string_field(8) @dataclass -class PrintFreshnessErrorStaleLine(betterproto.Message): - """Q019""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - index: int = betterproto.int32_field(3) - total: int = betterproto.int32_field(4) - execution_time: float = betterproto.float_field(5) - source_name: str = betterproto.string_field(6) - table_name: str = betterproto.string_field(7) - - -@dataclass -class PrintFreshnessWarnLine(betterproto.Message): - """Q020""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - index: int = betterproto.int32_field(3) - total: int = betterproto.int32_field(4) - execution_time: float = betterproto.float_field(5) - source_name: str = betterproto.string_field(6) - table_name: str = betterproto.string_field(7) - - -@dataclass -class PrintFreshnessPassLine(betterproto.Message): - """Q021""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - index: int = betterproto.int32_field(3) - total: int = betterproto.int32_field(4) - execution_time: float = betterproto.float_field(5) - source_name: str = betterproto.string_field(6) - table_name: str = betterproto.string_field(7) - - -@dataclass -class PrintCancelLine(betterproto.Message): +class LogCancelLine(betterproto.Message): """Q022""" info: "EventInfo" = betterproto.message_field(1) @@ -1577,7 +1664,6 @@ class NodeStart(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) - unique_id: str = betterproto.string_field(3) @dataclass @@ -1586,7 +1672,6 @@ class NodeFinished(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) - unique_id: str = betterproto.string_field(3) run_result: "RunResultMsg" = betterproto.message_field(4) @@ -1605,14 +1690,7 @@ class ConcurrencyLine(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) num_threads: int = betterproto.int32_field(2) target_name: str = betterproto.string_field(3) - - -@dataclass -class CompilingNode(betterproto.Message): - """Q028""" - - info: "EventInfo" = betterproto.message_field(1) - unique_id: str = betterproto.string_field(2) + node_count: int = betterproto.int32_field(4) @dataclass @@ -1620,7 +1698,7 @@ class WritingInjectedSQLForNode(betterproto.Message): """Q029""" info: "EventInfo" = betterproto.message_field(1) - unique_id: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) @dataclass @@ -1629,7 +1707,6 @@ class NodeCompiling(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) - unique_id: str = betterproto.string_field(3) @dataclass @@ -1638,11 +1715,10 @@ class NodeExecuting(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) - unique_id: str = betterproto.string_field(3) @dataclass -class PrintHookStartLine(betterproto.Message): +class LogHookStartLine(betterproto.Message): """Q032""" info: "EventInfo" = betterproto.message_field(1) @@ -1653,7 +1729,7 @@ class PrintHookStartLine(betterproto.Message): @dataclass -class PrintHookEndLine(betterproto.Message): +class LogHookEndLine(betterproto.Message): """Q033""" info: "EventInfo" = betterproto.message_field(1) @@ -1678,6 +1754,13 @@ class SkippingDetails(betterproto.Message): total: int = betterproto.int32_field(7) +@dataclass +class NothingToDo(betterproto.Message): + """Q035""" + + info: "EventInfo" = betterproto.message_field(1) + + @dataclass class RunningOperationUncaughtError(betterproto.Message): """Q036""" @@ -1697,13 +1780,21 @@ class EndRunResult(betterproto.Message): success: bool = betterproto.bool_field(5) +@dataclass +class NoNodesSelected(betterproto.Message): + """Q038""" + + info: "EventInfo" = betterproto.message_field(1) + + @dataclass class CatchableExceptionOnRun(betterproto.Message): """W002""" info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) - exc_info: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + exc: str = betterproto.string_field(3) + exc_info: str = betterproto.string_field(4) @dataclass @@ -1821,10 +1912,12 @@ class TimingInfoCollected(betterproto.Message): """Z010""" info: "EventInfo" = betterproto.message_field(1) + node_info: "NodeInfo" = betterproto.message_field(2) + timing_info: "TimingInfoMsg" = betterproto.message_field(3) @dataclass -class PrintDebugStackTrace(betterproto.Message): +class LogDebugStackTrace(betterproto.Message): """Z011""" info: "EventInfo" = betterproto.message_field(1) @@ -1991,7 +2084,7 @@ class EndOfRunSummary(betterproto.Message): @dataclass -class PrintSkipBecauseError(betterproto.Message): +class LogSkipBecauseError(betterproto.Message): """Z034""" info: "EventInfo" = betterproto.message_field(1) @@ -2066,34 +2159,9 @@ class TrackingInitializeFailure(betterproto.Message): exc_info: str = betterproto.string_field(2) -@dataclass -class GeneralWarningMsg(betterproto.Message): - """Z046""" - - info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) - log_fmt: str = betterproto.string_field(3) - - -@dataclass -class GeneralWarningException(betterproto.Message): - """Z047""" - - info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) - log_fmt: str = betterproto.string_field(3) - - -@dataclass -class EventBufferFull(betterproto.Message): - """Z048""" - - info: "EventInfo" = betterproto.message_field(1) - - @dataclass class RunResultWarningMessage(betterproto.Message): - """Z049""" + """Z046""" info: "EventInfo" = betterproto.message_field(1) msg: str = betterproto.string_field(2) diff --git a/core/dbt/events/test_types.py b/core/dbt/events/test_types.py index 5f4a10cd7d7..cf7307125ca 100644 --- a/core/dbt/events/test_types.py +++ b/core/dbt/events/test_types.py @@ -61,18 +61,3 @@ def code(self): def message(self) -> str: return f"Unit Test: {self.msg}" - - -# since mypy doesn't run on every file we need to suggest to mypy that every -# class gets instantiated. But we don't actually want to run this code. -# making the conditional `if False` causes mypy to skip it as dead code so -# we need to skirt around that by computing something it doesn't check statically. -# -# TODO remove these lines once we run mypy everywhere. -if 1 == 0: - IntegrationTestInfo(msg="") - IntegrationTestDebug(msg="") - IntegrationTestWarn(msg="") - IntegrationTestError(msg="") - IntegrationTestException(msg="") - UnitTestInfo(msg="") diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index eaa05b4f93d..1c330106d92 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -15,6 +15,7 @@ message EventInfo { string thread = 7; google.protobuf.Timestamp ts = 8; map extra = 9; + string category = 10; } // TimingInfo @@ -38,7 +39,6 @@ message NodeInfo { // RunResult message RunResultMsg { - // status: Union[RunStatus, TestStatus, FreshnessStatus] string status = 1; string message = 2; repeated TimingInfoMsg timing_info = 3; @@ -213,46 +213,98 @@ message ProjectCreated { string slack_url = 4; } +// D - Deprecation + +// D001 +message PackageRedirectDeprecation { + EventInfo info = 1; + string old_name = 2; + string new_name = 3; +} + +// D002 +message PackageInstallPathDeprecation { + EventInfo info = 1; +} + +// D003 +message ConfigSourcePathDeprecation { + EventInfo info = 1; + string deprecated_path = 2; + string exp_path = 3; +} + +// D004 +message ConfigDataPathDeprecation { + EventInfo info = 1; + string deprecated_path = 2; + string exp_path = 3; +} + +//D005 +message AdapterDeprecationWarning { + EventInfo info = 1; + string old_name = 2; + string new_name = 3; +} + +//D006 +message MetricAttributesRenamed { + EventInfo info = 1; + string metric_name = 2; +} + +//D007 +message ExposureNameDeprecation { + EventInfo info = 1; + string exposure = 2; +} + // E - DB Adapter // E001 message AdapterEventDebug { EventInfo info = 1; - string name = 2; - string base_msg = 3; - repeated string args = 4; + NodeInfo node_info = 2; + string name = 3; + string base_msg = 4; + repeated string args = 5; } // E002 message AdapterEventInfo { EventInfo info = 1; - string name = 2; - string base_msg = 3; - repeated string args = 4; + NodeInfo node_info = 2; + string name = 3; + string base_msg = 4; + repeated string args = 5; } // E003 message AdapterEventWarning { EventInfo info = 1; - string name = 2; - string base_msg = 3; - repeated string args = 4; + NodeInfo node_info = 2; + string name = 3; + string base_msg = 4; + repeated string args = 5; } // E004 message AdapterEventError { EventInfo info = 1; - string name = 2; - string base_msg = 3; - repeated string args = 4; - string exc_info = 5; + NodeInfo node_info = 2; + string name = 3; + string base_msg = 4; + repeated string args = 5; + string exc_info = 6; } // E005 message NewConnection { EventInfo info = 1; - string conn_type = 2; - string conn_name = 3; + NodeInfo node_info = 2; + string conn_type = 3; + string conn_name = 4; } // E006 @@ -262,13 +314,13 @@ message ConnectionReused { } // E007 -message ConnectionLeftOpen { +message ConnectionLeftOpenInCleanup { EventInfo info = 1; string conn_name = 2; } // E008 -message ConnectionClosed { +message ConnectionClosedInCleanup { EventInfo info = 1; string conn_name = 2; } @@ -276,26 +328,30 @@ message ConnectionClosed { // E009 message RollbackFailed { EventInfo info = 1; - string conn_name = 2; - string exc_info = 3; + NodeInfo node_info = 2; + string conn_name = 3; + string exc_info = 4; } // E010 -message ConnectionClosed2 { +message ConnectionClosed { EventInfo info = 1; - string conn_name = 2; + NodeInfo node_info = 2; + string conn_name = 3; } // E011 -message ConnectionLeftOpen2 { +message ConnectionLeftOpen { EventInfo info = 1; - string conn_name = 2; + NodeInfo node_info = 2; + string conn_name = 3; } // E012 message Rollback { EventInfo info = 1; - string conn_name = 2; + NodeInfo node_info = 2; + string conn_name = 3; } // E013 @@ -317,28 +373,32 @@ message ListRelations { // E015 message ConnectionUsed { EventInfo info = 1; - string conn_type = 2; - string conn_name = 3; + NodeInfo node_info = 2; + string conn_type = 3; + string conn_name = 4; } // E016 message SQLQuery { EventInfo info = 1; - string conn_name = 2; - string sql = 3; + NodeInfo node_info = 2; + string conn_name = 3; + string sql = 4; } // E017 message SQLQueryStatus { EventInfo info = 1; - string status = 2; - float elapsed = 3; + NodeInfo node_info = 2; + string status = 3; + float elapsed = 4; } // E018 message SQLCommit { EventInfo info = 1; - string conn_name = 2; + NodeInfo node_info = 2; + string conn_name = 3; } // E019 @@ -455,13 +515,13 @@ message AdapterImportError { message PluginLoadError { EventInfo info = 1; string exc_info = 2; - } // E037 message NewConnectionOpening { EventInfo info = 1; - string connection_state = 2; + NodeInfo node_info = 2; + string connection_state = 3; } // E038 @@ -478,7 +538,11 @@ message CodeExecutionStatus { float elapsed = 3; } -// Skipped E040 +// E040 +message CatalogGenerationError { + EventInfo info = 1; + string exc = 2; +} // E041 message WriteCatalogFailure { @@ -806,17 +870,99 @@ message PartialParsingDeletedExposure { } // I050 -message InvalidDisabledSourceInTestNode { +message InvalidDisabledTargetInTestNode { EventInfo info = 1; - string msg = 2; + string resource_type_title = 2; + string unique_id = 3; + string original_file_path = 4; + string target_kind = 5; + string target_name = 6; + string target_package = 7; } // I051 -message InvalidRefInTestNode { +message UnusedResourceConfigPath { EventInfo info = 1; - string msg = 2; + repeated string unused_config_paths = 2; +} + +// I052 +message SeedIncreased { + EventInfo info = 1; + string package_name = 2; + string name = 3; +} + +// I053 +message SeedExceedsLimitSamePath { + EventInfo info = 1; + string package_name = 2; + string name = 3; } +// I054 +message SeedExceedsLimitAndPathChanged { + EventInfo info = 1; + string package_name = 2; + string name = 3; +} + +// I055 +message SeedExceedsLimitChecksumChanged { + EventInfo info = 1; + string package_name = 2; + string name = 3; + string checksum_name = 4; +} + +// I056 +message UnusedTables { + EventInfo info = 1; + repeated string unused_tables = 2; +} + +// I057 +message WrongResourceSchemaFile { + EventInfo info = 1; + string patch_name = 2; + string resource_type = 3; + string plural_resource_type = 4; + string yaml_key = 5; + string file_path = 6; +} + +// I058 +message NoNodeForYamlKey { + EventInfo info = 1; + string patch_name = 2; + string yaml_key = 3; + string file_path = 4; +} + +// I059 +message MacroPatchNotFound { + EventInfo info = 1; + string patch_name = 2; +} + +// I060 +message NodeNotFoundOrDisabled { + EventInfo info = 1; + string original_file_path = 2; + string unique_id = 3; + string resource_type_title = 4; + string target_name = 5; + string target_kind = 6; + string target_package = 7; + string disabled = 8; +} + +// I061 +message JinjaLogWarning { + EventInfo info = 1; + NodeInfo node_info = 2; + string msg = 3; +} // M - Deps generation @@ -885,15 +1031,17 @@ message SelectorReportInvalidSelector { } // M011 -message MacroEventInfo { +message JinjaLogInfo { EventInfo info = 1; - string msg = 2; + NodeInfo node_info = 2; + string msg = 3; } // M012 -message MacroEventDebug { +message JinjaLogDebug { EventInfo info = 1; - string msg = 2; + NodeInfo node_info = 2; + string msg = 3; } // M013 @@ -992,6 +1140,19 @@ message DepsSetDownloadDirectory { string path = 2; } +// M029 +message DepsUnpinned { + EventInfo info = 1; + string revision = 2; + string git = 3; +} + +// M030 +message NoNodesForSelectionCriteria { + EventInfo info = 1; + string spec_raw = 2; +} + // Q - Node execution // Q001 @@ -1030,49 +1191,23 @@ message SQLRunnerException { } // Q007 -message PrintErrorTestResult { +message LogTestResult { EventInfo info = 1; NodeInfo node_info = 2; string name = 3; - int32 index = 4; - int32 num_models = 5; - float execution_time = 6; + string status = 4; + int32 index = 5; + int32 num_models = 6; + float execution_time = 7; + int32 num_failures = 8; } -// Q008 -message PrintPassTestResult { - EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - int32 index = 4; - int32 num_models = 5; - float execution_time = 6; -} -// Q009 -message PrintWarnTestResult { - EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - int32 index = 4; - int32 num_models = 5; - float execution_time = 6; - int32 num_failures = 7; -} +// Skipped Q008, Q009, Q010 -// Q010 -message PrintFailureTestResult { - EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - int32 index = 4; - int32 num_models = 5; - float execution_time = 6; - int32 num_failures = 7; -} // Q011 -message PrintStartLine { +message LogStartLine { EventInfo info = 1; NodeInfo node_info = 2; string description = 3; @@ -1081,29 +1216,20 @@ message PrintStartLine { } // Q012 -message PrintModelResultLine { +message LogModelResult { EventInfo info = 1; NodeInfo node_info = 2; string description = 3; string status = 4; int32 index = 5; int32 total = 6; - float execution_time = 7; + int32 execution_time = 7; } -// Q013 -message PrintModelErrorResultLine { - EventInfo info = 1; - NodeInfo node_info = 2; - string description = 3; - string status = 4; - int32 index = 5; - int32 total = 6; - float execution_time = 7; -} +// skipped Q013, Q014 -// Q014 -message PrintSnapshotErrorResultLine { +// Q015 +message LogSnapshotResult { EventInfo info = 1; NodeInfo node_info = 2; string description = 3; @@ -1114,88 +1240,39 @@ message PrintSnapshotErrorResultLine { map cfg = 8; } -// Q015 -message PrintSnapshotResultLine { +// Q016 +message LogSeedResult { EventInfo info = 1; NodeInfo node_info = 2; - string description = 3; - string status = 4; + string status = 3; + string result_message = 4; int32 index = 5; int32 total = 6; float execution_time = 7; - map cfg = 8; + string schema = 8; + string relation = 9; } -// Q016 -message PrintSeedErrorResultLine { - EventInfo info = 1; - NodeInfo node_info = 2; - string status = 3; - int32 index = 4; - int32 total = 5; - float execution_time = 6; - string schema = 7; - string relation = 8; -} +// Skipped Q017 -// Q017 -message PrintSeedResultLine { +// Q018 +message LogFreshnessResult { EventInfo info = 1; - NodeInfo node_info = 2; - string status = 3; + string status = 2; + NodeInfo node_info = 3; int32 index = 4; int32 total = 5; float execution_time = 6; - string schema = 7; - string relation = 8; + string source_name = 7; + string table_name = 8; } -// Q018 -message PrintFreshnessErrorLine { - EventInfo info = 1; - NodeInfo node_info = 2; - int32 index = 3; - int32 total = 4; - float execution_time = 5; - string source_name = 6; - string table_name = 7; -} -// Q019 -message PrintFreshnessErrorStaleLine { - EventInfo info = 1; - NodeInfo node_info = 2; - int32 index = 3; - int32 total = 4; - float execution_time = 5; - string source_name = 6; - string table_name = 7; -} - -// Q020 -message PrintFreshnessWarnLine { - EventInfo info = 1; - NodeInfo node_info = 2; - int32 index = 3; - int32 total = 4; - float execution_time = 5; - string source_name = 6; - string table_name = 7; -} +// Skipped Q019, Q020, Q021 -// Q021 -message PrintFreshnessPassLine { - EventInfo info = 1; - NodeInfo node_info = 2; - int32 index = 3; - int32 total = 4; - float execution_time = 5; - string source_name = 6; - string table_name = 7; -} // Q022 -message PrintCancelLine { +message LogCancelLine { EventInfo info = 1; string conn_name = 2; } @@ -1210,14 +1287,12 @@ message DefaultSelector { message NodeStart { EventInfo info = 1; NodeInfo node_info = 2; - string unique_id = 3; } // Q025 message NodeFinished { EventInfo info = 1; NodeInfo node_info = 2; - string unique_id = 3; RunResultMsg run_result = 4; } @@ -1232,36 +1307,31 @@ message ConcurrencyLine { EventInfo info = 1; int32 num_threads = 2; string target_name = 3; + int32 node_count = 4; } -// Q028 -message CompilingNode { - EventInfo info = 1; - string unique_id = 2; -} +// Skipped Q028 // Q029 message WritingInjectedSQLForNode { EventInfo info = 1; - string unique_id = 2; + NodeInfo node_info = 2; } // Q030 message NodeCompiling { EventInfo info = 1; NodeInfo node_info = 2; - string unique_id = 3; } // Q031 message NodeExecuting { EventInfo info = 1; NodeInfo node_info = 2; - string unique_id = 3; } // Q032 -message PrintHookStartLine { +message LogHookStartLine { EventInfo info = 1; NodeInfo node_info = 2; string statement = 3; @@ -1270,7 +1340,7 @@ message PrintHookStartLine { } // Q033 -message PrintHookEndLine { +message LogHookEndLine { EventInfo info = 1; NodeInfo node_info = 2; string statement = 3; @@ -1291,7 +1361,10 @@ message SkippingDetails { int32 total = 7; } -// Skipped Q035 +// Q035 +message NothingToDo { + EventInfo info = 1; +} // Q036 message RunningOperationUncaughtError { @@ -1308,6 +1381,11 @@ message EndRunResult { bool success = 5; } +// Q038 +message NoNodesSelected { + EventInfo info = 1; +} + // W - Node testing // Skipped W001 @@ -1315,8 +1393,9 @@ message EndRunResult { // W002 message CatchableExceptionOnRun { EventInfo info = 1; - string exc = 2; - string exc_info = 3; + NodeInfo node_info = 2; + string exc = 3; + string exc_info = 4; } // W003 @@ -1408,10 +1487,12 @@ message SystemReportReturnCode { // Z010 message TimingInfoCollected { EventInfo info = 1; + NodeInfo node_info = 2; + TimingInfoMsg timing_info = 3; } // Z011 -message PrintDebugStackTrace { +message LogDebugStackTrace { EventInfo info = 1; string exc_info = 2; } @@ -1538,7 +1619,7 @@ message EndOfRunSummary { // Skipped Z031, Z032, Z033 // Z034 -message PrintSkipBecauseError { +message LogSkipBecauseError { EventInfo info = 1; string schema = 2; string relation = 3; @@ -1596,25 +1677,6 @@ message TrackingInitializeFailure { // Skipped Z045 // Z046 -message GeneralWarningMsg { - EventInfo info = 1; - string msg = 2; - string log_fmt = 3; -} - -// Z047 -message GeneralWarningException { - EventInfo info = 1; - string exc = 2; - string log_fmt = 3; -} - -// Z048 -message EventBufferFull { - EventInfo info = 1; -} - -// Z049 message RunResultWarningMessage { EventInfo info = 1; string msg = 2; diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index f6e66f941d2..0a0cd04fe1d 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -1,6 +1,8 @@ from dataclasses import dataclass -from dbt import ui +from dbt.ui import line_wrap_message, warning_tag, red, green, yellow +from dbt.constants import MAXIMUM_SEED_SIZE_NAME, PIN_PACKAGE_URL from dbt.events.base_types import ( + DynamicLevel, NoFile, DebugLevel, InfoLevel, @@ -12,9 +14,9 @@ ) from dbt.events.format import format_fancy_output_line, pluralize -# The generated classes quote the included message classes, requiring the following line +# The generated classes quote the included message classes, requiring the following lines from dbt.events.proto_types import EventInfo, RunResultMsg, ListOfStrings # noqa -from dbt.events.proto_types import NodeInfo, ReferenceKeyMsg +from dbt.events.proto_types import NodeInfo, ReferenceKeyMsg, TimingInfoMsg # noqa from dbt.events import proto_types as pt from dbt.node_types import NodeType @@ -32,10 +34,11 @@ # | Code | Description | # |:----:|:-------------------:| # | A | Pre-project loading | +# | D | Deprecations | # | E | DB adapter | # | I | Project parsing | # | M | Deps generation | -# | Q | Node execution | +# | Q | Node execution | # | W | Node testing | # | Z | Misc | # | T | Test only | @@ -305,6 +308,114 @@ def message(self) -> str: """ +# ======================================================= +# D - Deprecations +# ======================================================= + + +@dataclass +class PackageRedirectDeprecation(WarnLevel, pt.PackageRedirectDeprecation): # noqa + def code(self): + return "D001" + + def message(self): + description = ( + f"The `{self.old_name}` package is deprecated in favor of `{self.new_name}`. Please " + f"update your `packages.yml` configuration to use `{self.new_name}` instead." + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class PackageInstallPathDeprecation(WarnLevel, pt.PackageInstallPathDeprecation): # noqa + def code(self): + return "D002" + + def message(self): + description = """\ + The default package install path has changed from `dbt_modules` to `dbt_packages`. + Please update `clean-targets` in `dbt_project.yml` and check `.gitignore` as well. + Or, set `packages-install-path: dbt_modules` if you'd like to keep the current value. + """ + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class ConfigSourcePathDeprecation(WarnLevel, pt.ConfigSourcePathDeprecation): # noqa + def code(self): + return "D003" + + def message(self): + description = ( + f"The `{self.deprecated_path}` config has been renamed to `{self.exp_path}`." + "Please update your `dbt_project.yml` configuration to reflect this change." + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class ConfigDataPathDeprecation(WarnLevel, pt.ConfigDataPathDeprecation): # noqa + def code(self): + return "D004" + + def message(self): + description = ( + f"The `{self.deprecated_path}` config has been renamed to `{self.exp_path}`." + "Please update your `dbt_project.yml` configuration to reflect this change." + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class AdapterDeprecationWarning(WarnLevel, pt.AdapterDeprecationWarning): # noqa + def code(self): + return "D005" + + def message(self): + description = ( + f"The adapter function `adapter.{self.old_name}` is deprecated and will be removed in " + f"a future release of dbt. Please use `adapter.{self.new_name}` instead. " + f"\n\nDocumentation for {self.new_name} can be found here:" + f"\n\nhttps://docs.getdbt.com/docs/adapter" + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class MetricAttributesRenamed(WarnLevel, pt.MetricAttributesRenamed): # noqa + def code(self): + return "D006" + + def message(self): + description = ( + "dbt-core v1.3 renamed attributes for metrics:" + "\n 'sql' -> 'expression'" + "\n 'type' -> 'calculation_method'" + "\n 'type: expression' -> 'calculation_method: derived'" + "\nThe old metric parameter names will be fully deprecated in v1.4." + f"\nPlease remove them from the metric definition of metric '{self.metric_name}'" + "\nRelevant issue here: https://github.com/dbt-labs/dbt-core/issues/5849" + ) + + return warning_tag(f"Deprecated functionality\n\n{description}") + + +@dataclass +class ExposureNameDeprecation(WarnLevel, pt.ExposureNameDeprecation): # noqa + def code(self): + return "D007" + + def message(self): + description = ( + "Starting in v1.3, the 'name' of an exposure should contain only letters, " + "numbers, and underscores. Exposures support a new property, 'label', which may " + f"contain spaces, capital letters, and special characters. {self.exposure} does not " + "follow this pattern. Please update the 'name', and use the 'label' property for a " + "human-friendly title. This will raise an error in a future version of dbt-core." + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + # ======================================================= # E - DB Adapter # ======================================================= @@ -365,7 +476,7 @@ def message(self) -> str: @dataclass -class ConnectionLeftOpen(DebugLevel, pt.ConnectionLeftOpen): +class ConnectionLeftOpenInCleanup(DebugLevel, pt.ConnectionLeftOpenInCleanup): def code(self): return "E007" @@ -374,7 +485,7 @@ def message(self) -> str: @dataclass -class ConnectionClosed(DebugLevel, pt.ConnectionClosed): +class ConnectionClosedInCleanup(DebugLevel, pt.ConnectionClosedInCleanup): def code(self): return "E008" @@ -393,7 +504,7 @@ def message(self) -> str: # TODO: can we combine this with ConnectionClosed? @dataclass -class ConnectionClosed2(DebugLevel, pt.ConnectionClosed2): +class ConnectionClosed(DebugLevel, pt.ConnectionClosed): def code(self): return "E010" @@ -403,7 +514,7 @@ def message(self) -> str: # TODO: can we combine this with ConnectionLeftOpen? @dataclass -class ConnectionLeftOpen2(DebugLevel, pt.ConnectionLeftOpen2): +class ConnectionLeftOpen(DebugLevel, pt.ConnectionLeftOpen): def code(self): return "E011" @@ -675,7 +786,13 @@ def message(self) -> str: return f"Execution status: {self.status} in {self.elapsed} seconds" -# Skipped E040 +@dataclass +class CatalogGenerationError(WarnLevel, pt.CatalogGenerationError): + def code(self): + return "E040" + + def message(self) -> str: + return f"Encountered an error while generating catalog: {self.exc}" @dataclass @@ -1218,23 +1335,194 @@ def message(self) -> str: return f"Partial parsing: deleted exposure {self.unique_id}" -# TODO: switch to storing structured info and calling get_target_failure_msg @dataclass -class InvalidDisabledSourceInTestNode( - WarnLevel, EventStringFunctor, pt.InvalidDisabledSourceInTestNode -): +class InvalidDisabledTargetInTestNode(WarnLevel, pt.InvalidDisabledTargetInTestNode): def code(self): return "I050" def message(self) -> str: - return ui.warning_tag(self.msg) + + target_package_string = "" + if self.target_package != target_package_string: + target_package_string = "in package '{}' ".format(self.target_package) + + msg = "{} '{}' ({}) depends on a {} named '{}' {}which is disabled".format( + self.resource_type_title, + self.unique_id, + self.original_file_path, + self.target_kind, + self.target_name, + target_package_string, + ) + + return warning_tag(msg) @dataclass -class InvalidRefInTestNode(DebugLevel, EventStringFunctor, pt.InvalidRefInTestNode): +class UnusedResourceConfigPath(WarnLevel, pt.UnusedResourceConfigPath): def code(self): return "I051" + def message(self) -> str: + path_list = "\n".join(f"- {u}" for u in self.unused_config_paths) + msg = ( + "Configuration paths exist in your dbt_project.yml file which do not " + "apply to any resources.\n" + f"There are {len(self.unused_config_paths)} unused configuration paths:\n{path_list}" + ) + return warning_tag(msg) + + +@dataclass +class SeedIncreased(WarnLevel, pt.SeedIncreased): + def code(self): + return "I052" + + def message(self) -> str: + msg = ( + f"Found a seed ({self.package_name}.{self.name}) " + f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was " + f"<={MAXIMUM_SEED_SIZE_NAME}, so it has changed" + ) + return msg + + +@dataclass +class SeedExceedsLimitSamePath(WarnLevel, pt.SeedExceedsLimitSamePath): + def code(self): + return "I053" + + def message(self) -> str: + msg = ( + f"Found a seed ({self.package_name}.{self.name}) " + f">{MAXIMUM_SEED_SIZE_NAME} in size at the same path, dbt " + f"cannot tell if it has changed: assuming they are the same" + ) + return msg + + +@dataclass +class SeedExceedsLimitAndPathChanged(WarnLevel, pt.SeedExceedsLimitAndPathChanged): + def code(self): + return "I054" + + def message(self) -> str: + msg = ( + f"Found a seed ({self.package_name}.{self.name}) " + f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was in " + f"a different location, assuming it has changed" + ) + return msg + + +@dataclass +class SeedExceedsLimitChecksumChanged(WarnLevel, pt.SeedExceedsLimitChecksumChanged): + def code(self): + return "I055" + + def message(self) -> str: + msg = ( + f"Found a seed ({self.package_name}.{self.name}) " + f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file had a " + f"checksum type of {self.checksum_name}, so it has changed" + ) + return msg + + +@dataclass +class UnusedTables(WarnLevel, pt.UnusedTables): + def code(self): + return "I056" + + def message(self) -> str: + msg = [ + "During parsing, dbt encountered source overrides that had no target:", + ] + msg += self.unused_tables + msg.append("") + return warning_tag("\n".join(msg)) + + +@dataclass +class WrongResourceSchemaFile(WarnLevel, pt.WrongResourceSchemaFile): + def code(self): + return "I057" + + def message(self) -> str: + msg = line_wrap_message( + f"""\ + '{self.patch_name}' is a {self.resource_type} node, but it is + specified in the {self.yaml_key} section of + {self.file_path}. + To fix this error, place the `{self.patch_name}` + specification under the {self.plural_resource_type} key instead. + """ + ) + return warning_tag(msg) + + +@dataclass +class NoNodeForYamlKey(WarnLevel, pt.NoNodeForYamlKey): + def code(self): + return "I058" + + def message(self) -> str: + msg = ( + f"Did not find matching node for patch with name '{self.patch_name}' " + f"in the '{self.yaml_key}' section of " + f"file '{self.file_path}'" + ) + return warning_tag(msg) + + +@dataclass +class MacroPatchNotFound(WarnLevel, pt.MacroPatchNotFound): + def code(self): + return "I059" + + def message(self) -> str: + msg = f'Found patch for macro "{self.patch_name}" which was not found' + return warning_tag(msg) + + +@dataclass +class NodeNotFoundOrDisabled(WarnLevel, pt.NodeNotFoundOrDisabled): + def code(self): + return "I060" + + def message(self) -> str: + # this is duplicated logic from exceptions.get_not_found_or_disabled_msg + # when we convert exceptions to be stuctured maybe it can be combined? + # convverting the bool to a string since None is also valid + if self.disabled == "None": + reason = "was not found or is disabled" + elif self.disabled == "True": + reason = "is disabled" + else: + reason = "was not found" + + target_package_string = "" + if self.target_package is not None: + target_package_string = "in package '{}' ".format(self.target_package) + + msg = "{} '{}' ({}) depends on a {} named '{}' {}which {}".format( + self.resource_type_title, + self.unique_id, + self.original_file_path, + self.target_kind, + self.target_name, + target_package_string, + reason, + ) + + return warning_tag(msg) + + +@dataclass +class JinjaLogWarning(WarnLevel, pt.JinjaLogWarning): + def code(self): + return "I061" + def message(self) -> str: return self.msg @@ -1338,20 +1626,22 @@ def message(self) -> str: @dataclass -class MacroEventInfo(InfoLevel, EventStringFunctor, pt.MacroEventInfo): +class JinjaLogInfo(InfoLevel, EventStringFunctor, pt.JinjaLogInfo): def code(self): return "M011" def message(self) -> str: + # This is for the log method used in macros so msg cannot be built here return self.msg @dataclass -class MacroEventDebug(DebugLevel, EventStringFunctor, pt.MacroEventDebug): +class JinjaLogDebug(DebugLevel, EventStringFunctor, pt.JinjaLogDebug): def code(self): return "M012" def message(self) -> str: + # This is for the log method used in macros so msg cannot be built here return self.msg @@ -1417,7 +1707,7 @@ def code(self): def message(self) -> str: return "Updates available for packages: {} \ \nUpdate your versions in packages.yml, then run dbt deps".format( - self.packages + self.packages.value ) @@ -1505,6 +1795,35 @@ def message(self) -> str: return f"Set downloads directory='{self.path}'" +@dataclass +class DepsUnpinned(WarnLevel, pt.DepsUnpinned): + def code(self): + return "M029" + + def message(self) -> str: + if self.revision == "HEAD": + unpinned_msg = "not pinned, using HEAD (default branch)" + elif self.revision in ("main", "master"): + unpinned_msg = f'pinned to the "{self.revision}" branch' + else: + unpinned_msg = None + + msg = ( + f'The git package "{self.git}" \n\tis {unpinned_msg}.\n\tThis can introduce ' + f"breaking changes into your project without warning!\n\nSee {PIN_PACKAGE_URL}" + ) + return yellow(f"WARNING: {msg}") + + +@dataclass +class NoNodesForSelectionCriteria(WarnLevel, pt.NoNodesForSelectionCriteria): + def code(self): + return "M030" + + def message(self) -> str: + return f"The selection criterion '{self.spec_raw}' does not match any nodes" + + # ======================================================= # Q - Node execution # ======================================================= @@ -1565,76 +1884,54 @@ def message(self) -> str: @dataclass -@dataclass -class PrintErrorTestResult(ErrorLevel, pt.PrintErrorTestResult): +class LogTestResult(DynamicLevel, pt.LogTestResult): def code(self): return "Q007" def message(self) -> str: - info = "ERROR" - msg = f"{info} {self.name}" - return format_fancy_output_line( - msg=msg, - status=ui.red(info), - index=self.index, - total=self.num_models, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintPassTestResult(InfoLevel, pt.PrintPassTestResult): - def code(self): - return "Q008" - - def message(self) -> str: - info = "PASS" + if self.status == "error": + info = "ERROR" + status = red(info) + elif self.status == "pass": + info = "PASS" + status = green(info) + elif self.status == "warn": + info = f"WARN {self.num_failures}" + status = yellow(info) + else: # self.status == "fail": + info = f"FAIL {self.num_failures}" + status = red(info) msg = f"{info} {self.name}" - return format_fancy_output_line( - msg=msg, - status=ui.green(info), - index=self.index, - total=self.num_models, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintWarnTestResult(WarnLevel, pt.PrintWarnTestResult): - def code(self): - return "Q009" - def message(self) -> str: - info = f"WARN {self.num_failures}" - msg = f"{info} {self.name}" return format_fancy_output_line( msg=msg, - status=ui.yellow(info), + status=status, index=self.index, total=self.num_models, execution_time=self.execution_time, ) + @classmethod + def status_to_level(cls, status): + # The statuses come from TestStatus + # TODO should this return EventLevel enum instead? + level_lookup = { + "fail": "error", + "pass": "info", + "warn": "warn", + "error": "error", + } + if status in level_lookup: + return level_lookup[status] + else: + return "info" -@dataclass -class PrintFailureTestResult(ErrorLevel, pt.PrintFailureTestResult): - def code(self): - return "Q010" - def message(self) -> str: - info = f"FAIL {self.num_failures}" - msg = f"{info} {self.name}" - return format_fancy_output_line( - msg=msg, - status=ui.red(info), - index=self.index, - total=self.num_models, - execution_time=self.execution_time, - ) +# Skipped Q008, Q009, Q010 @dataclass -class PrintStartLine(InfoLevel, pt.PrintStartLine): # noqa +class LogStartLine(InfoLevel, pt.LogStartLine): # noqa def code(self): return "Q011" @@ -1644,67 +1941,48 @@ def message(self) -> str: @dataclass -class PrintModelResultLine(InfoLevel, pt.PrintModelResultLine): +class LogModelResult(DynamicLevel, pt.LogModelResult): def code(self): return "Q012" def message(self) -> str: - info = "OK created" - msg = f"{info} {self.description}" - return format_fancy_output_line( - msg=msg, - status=ui.green(self.status), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintModelErrorResultLine(ErrorLevel, pt.PrintModelErrorResultLine): - def code(self): - return "Q013" + if self.status == "error": + info = "ERROR creating" + status = red(self.status.upper()) + else: + info = "OK created" + status = green(self.status) - def message(self) -> str: - info = "ERROR creating" msg = f"{info} {self.description}" return format_fancy_output_line( msg=msg, - status=ui.red(self.status.upper()), + status=status, index=self.index, total=self.total, execution_time=self.execution_time, ) -@dataclass -class PrintSnapshotErrorResultLine(ErrorLevel, pt.PrintSnapshotErrorResultLine): - def code(self): - return "Q014" - - def message(self) -> str: - info = "ERROR snapshotting" - msg = "{info} {description}".format(info=info, description=self.description, **self.cfg) - return format_fancy_output_line( - msg=msg, - status=ui.red(self.status.upper()), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) +# Skipped Q013, Q014 @dataclass -class PrintSnapshotResultLine(InfoLevel, pt.PrintSnapshotResultLine): +class LogSnapshotResult(DynamicLevel, pt.LogSnapshotResult): def code(self): return "Q015" def message(self) -> str: - info = "OK snapshotted" + if self.status == "error": + info = "ERROR snapshotting" + status = red(self.status.upper()) + else: + info = "OK snapshotted" + status = green(self.status) + msg = "{info} {description}".format(info=info, description=self.description, **self.cfg) return format_fancy_output_line( msg=msg, - status=ui.green(self.status), + status=status, index=self.index, total=self.total, execution_time=self.execution_time, @@ -1712,115 +1990,84 @@ def message(self) -> str: @dataclass -class PrintSeedErrorResultLine(ErrorLevel, pt.PrintSeedErrorResultLine): +class LogSeedResult(DynamicLevel, pt.LogSeedResult): def code(self): return "Q016" def message(self) -> str: - info = "ERROR loading" + if self.status == "error": + info = "ERROR loading" + status = red(self.status.upper()) + else: + info = "OK loaded" + status = green(self.result_message) msg = f"{info} seed file {self.schema}.{self.relation}" return format_fancy_output_line( msg=msg, - status=ui.red(self.status.upper()), + status=status, index=self.index, total=self.total, execution_time=self.execution_time, ) -@dataclass -class PrintSeedResultLine(InfoLevel, pt.PrintSeedResultLine): - def code(self): - return "Q017" - - def message(self) -> str: - info = "OK loaded" - msg = f"{info} seed file {self.schema}.{self.relation}" - return format_fancy_output_line( - msg=msg, - status=ui.green(self.status), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) +# Skipped Q017 @dataclass -class PrintFreshnessErrorLine(ErrorLevel, pt.PrintFreshnessErrorLine): +class LogFreshnessResult(DynamicLevel, pt.LogFreshnessResult): def code(self): return "Q018" def message(self) -> str: - info = "ERROR" - msg = f"{info} freshness of {self.source_name}.{self.table_name}" - return format_fancy_output_line( - msg=msg, - status=ui.red(info), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintFreshnessErrorStaleLine(ErrorLevel, pt.PrintFreshnessErrorStaleLine): - def code(self): - return "Q019" - - def message(self) -> str: - info = "ERROR STALE" - msg = f"{info} freshness of {self.source_name}.{self.table_name}" - return format_fancy_output_line( - msg=msg, - status=ui.red(info), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintFreshnessWarnLine(WarnLevel, pt.PrintFreshnessWarnLine): - def code(self): - return "Q020" - - def message(self) -> str: - info = "WARN" + if self.status == "runtime error": + info = "ERROR" + status = red(info) + elif self.status == "error": + info = "ERROR STALE" + status = red(info) + elif self.status == "warn": + info = "WARN" + status = yellow(info) + else: + info = "PASS" + status = green(info) msg = f"{info} freshness of {self.source_name}.{self.table_name}" return format_fancy_output_line( msg=msg, - status=ui.yellow(info), + status=status, index=self.index, total=self.total, execution_time=self.execution_time, ) + @classmethod + def status_to_level(cls, status): + # The statuses come from FreshnessStatus + # TODO should this return EventLevel enum instead? + level_lookup = { + "runtime error": "error", + "pass": "info", + "warn": "warn", + "error": "error", + } + if status in level_lookup: + return level_lookup[status] + else: + return "info" -@dataclass -class PrintFreshnessPassLine(InfoLevel, pt.PrintFreshnessPassLine): - def code(self): - return "Q021" - def message(self) -> str: - info = "PASS" - msg = f"{info} freshness of {self.source_name}.{self.table_name}" - return format_fancy_output_line( - msg=msg, - status=ui.green(info), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) +# Skipped Q019, Q020, Q021 @dataclass -class PrintCancelLine(ErrorLevel, pt.PrintCancelLine): +class LogCancelLine(ErrorLevel, pt.LogCancelLine): def code(self): return "Q022" def message(self) -> str: msg = "CANCEL query {}".format(self.conn_name) - return format_fancy_output_line(msg=msg, status=ui.red("CANCEL"), index=None, total=None) + return format_fancy_output_line(msg=msg, status=red("CANCEL"), index=None, total=None) @dataclass @@ -1838,7 +2085,7 @@ def code(self): return "Q024" def message(self) -> str: - return f"Began running node {self.unique_id}" + return f"Began running node {self.node_info.unique_id}" @dataclass @@ -1847,7 +2094,7 @@ def code(self): return "Q025" def message(self) -> str: - return f"Finished running node {self.unique_id}" + return f"Finished running node {self.node_info.unique_id}" @dataclass @@ -1861,7 +2108,7 @@ def message(self) -> str: "cancellation. Some queries may still be " "running!" ) - return ui.yellow(msg) + return yellow(msg) @dataclass @@ -1873,13 +2120,7 @@ def message(self) -> str: return f"Concurrency: {self.num_threads} threads (target='{self.target_name}')" -@dataclass -class CompilingNode(DebugLevel, pt.CompilingNode): - def code(self): - return "Q028" - - def message(self) -> str: - return f"Compiling {self.unique_id}" +# Skipped Q028 @dataclass @@ -1888,7 +2129,7 @@ def code(self): return "Q029" def message(self) -> str: - return f'Writing injected SQL for node "{self.unique_id}"' + return f'Writing injected SQL for node "{self.node_info.unique_id}"' @dataclass @@ -1897,7 +2138,7 @@ def code(self): return "Q030" def message(self) -> str: - return f"Began compiling node {self.unique_id}" + return f"Began compiling node {self.node_info.unique_id}" @dataclass @@ -1906,11 +2147,11 @@ def code(self): return "Q031" def message(self) -> str: - return f"Began executing node {self.unique_id}" + return f"Began executing node {self.node_info.unique_id}" @dataclass -class PrintHookStartLine(InfoLevel, pt.PrintHookStartLine): # noqa +class LogHookStartLine(InfoLevel, pt.LogHookStartLine): # noqa def code(self): return "Q032" @@ -1922,7 +2163,7 @@ def message(self) -> str: @dataclass -class PrintHookEndLine(InfoLevel, pt.PrintHookEndLine): # noqa +class LogHookEndLine(InfoLevel, pt.LogHookEndLine): # noqa def code(self): return "Q033" @@ -1930,7 +2171,7 @@ def message(self) -> str: msg = "OK hook: {}".format(self.statement) return format_fancy_output_line( msg=msg, - status=ui.green(self.status), + status=green(self.status), index=self.index, total=self.total, execution_time=self.execution_time, @@ -1949,11 +2190,17 @@ def message(self) -> str: else: msg = f"SKIP {self.resource_type} {self.node_name}" return format_fancy_output_line( - msg=msg, status=ui.yellow("SKIP"), index=self.index, total=self.total + msg=msg, status=yellow("SKIP"), index=self.index, total=self.total ) -# Skipped Q035 +@dataclass +class NothingToDo(WarnLevel, pt.NothingToDo): + def code(self): + return "Q035" + + def message(self) -> str: + return "Nothing to do. Try checking your model configs and model specification args" @dataclass @@ -1974,6 +2221,15 @@ def message(self) -> str: return "Command end result" +@dataclass +class NoNodesSelected(WarnLevel, pt.NoNodesSelected): + def code(self): + return "Q038" + + def message(self) -> str: + return "No nodes selected!" + + # ======================================================= # W - Node testing # ======================================================= @@ -2003,7 +2259,7 @@ def message(self) -> str: """.strip() return "{prefix}\n{error}\n\n{note}".format( - prefix=ui.red(prefix), error=str(self.exc).strip(), note=internal_error_string + prefix=red(prefix), error=str(self.exc).strip(), note=internal_error_string ) @@ -2017,7 +2273,7 @@ def message(self) -> str: if node_description is None: node_description = self.unique_id prefix = "Unhandled error while executing {}".format(node_description) - return "{prefix}\n{error}".format(prefix=ui.red(prefix), error=str(self.exc).strip()) + return "{prefix}\n{error}".format(prefix=red(prefix), error=str(self.exc).strip()) @dataclass @@ -2133,18 +2389,18 @@ def code(self): return "Z010" def message(self) -> str: - return "finished collecting timing info" + return f"Timing info for {self.node_info.unique_id} ({self.timing_info.name}): {self.timing_info.started_at} => {self.timing_info.completed_at}" # This prints the stack trace at the debug level while allowing just the nice exception message # at the error level - or whatever other level chosen. Used in multiple places. @dataclass -class PrintDebugStackTrace(DebugLevel, pt.PrintDebugStackTrace): # noqa +class LogDebugStackTrace(DebugLevel, pt.LogDebugStackTrace): # noqa def code(self): return "Z011" def message(self) -> str: - return "" + return f"{self.exc_info}" # We don't write "clean" events to the log, because the clean command @@ -2241,7 +2497,7 @@ def code(self): def message(self) -> str: info = "Warning" - return ui.yellow(f"{info} in {self.resource_type} {self.node_name} ({self.path})") + return yellow(f"{info} in {self.resource_type} {self.node_name} ({self.path})") @dataclass @@ -2251,7 +2507,7 @@ def code(self): def message(self) -> str: info = "Failure" - return ui.red(f"{info} in {self.resource_type} {self.node_name} ({self.path})") + return red(f"{info} in {self.resource_type} {self.node_name} ({self.path})") @dataclass @@ -2270,6 +2526,7 @@ def code(self): return "Z024" def message(self) -> str: + # This is the message on the result object, cannot be built here return f" {self.msg}" @@ -2302,13 +2559,16 @@ def message(self) -> str: return f" See test failures:\n {border}\n {msg}\n {border}" +# FirstRunResultError and AfterFirstRunResultError are just splitting the message from the result +# object into multiple log lines +# TODO: is this reallly needed? See printer.py @dataclass class FirstRunResultError(ErrorLevel, EventStringFunctor, pt.FirstRunResultError): def code(self): return "Z028" def message(self) -> str: - return ui.yellow(self.msg) + return yellow(self.msg) @dataclass @@ -2329,13 +2589,13 @@ def message(self) -> str: error_plural = pluralize(self.num_errors, "error") warn_plural = pluralize(self.num_warnings, "warning") if self.keyboard_interrupt: - message = ui.yellow("Exited because of keyboard interrupt.") + message = yellow("Exited because of keyboard interrupt.") elif self.num_errors > 0: - message = ui.red("Completed with {} and {}:".format(error_plural, warn_plural)) + message = red("Completed with {} and {}:".format(error_plural, warn_plural)) elif self.num_warnings > 0: - message = ui.yellow("Completed with {}:".format(warn_plural)) + message = yellow("Completed with {}:".format(warn_plural)) else: - message = ui.green("Completed successfully") + message = green("Completed successfully") return message @@ -2343,14 +2603,14 @@ def message(self) -> str: @dataclass -class PrintSkipBecauseError(ErrorLevel, pt.PrintSkipBecauseError): +class LogSkipBecauseError(ErrorLevel, pt.LogSkipBecauseError): def code(self): return "Z034" def message(self) -> str: msg = f"SKIP relation {self.schema}.{self.relation} due to ephemeral model error" return format_fancy_output_line( - msg=msg, status=ui.red("ERROR SKIP"), index=self.index, total=self.total + msg=msg, status=red("ERROR SKIP"), index=self.index, total=self.total ) @@ -2446,423 +2706,12 @@ def message(self) -> str: return "Got an exception trying to initialize tracking" -# Skipped Z045 - - -@dataclass -class GeneralWarningMsg(WarnLevel, EventStringFunctor, pt.GeneralWarningMsg): - def code(self): - return "Z046" - - def message(self) -> str: - return self.log_fmt.format(self.msg) if self.log_fmt is not None else self.msg - - -@dataclass -class GeneralWarningException(WarnLevel, pt.GeneralWarningException): - def code(self): - return "Z047" - - def message(self) -> str: - return self.log_fmt.format(str(self.exc)) if self.log_fmt is not None else str(self.exc) - - -@dataclass -class EventBufferFull(WarnLevel, pt.EventBufferFull): - def code(self): - return "Z048" - - def message(self) -> str: - return ( - "Internal logging/event buffer full." - "Earliest logs/events will be dropped as new ones are fired (FIFO)." - ) - - +# this is the message from the result object @dataclass class RunResultWarningMessage(WarnLevel, EventStringFunctor, pt.RunResultWarningMessage): def code(self): - return "Z049" + return "Z046" def message(self) -> str: + # This is the message on the result object, cannot be formatted in event return self.msg - - -# since mypy doesn't run on every file we need to suggest to mypy that every -# class gets instantiated. But we don't actually want to run this code. -# making the conditional `if False` causes mypy to skip it as dead code so -# we need to skirt around that by computing something it doesn't check statically. -# -# TODO remove these lines once we run mypy everywhere. -if 1 == 0: - - # A - pre-project loading - MainReportVersion(version="") - MainReportArgs(args={}) - MainTrackingUserState(user_state="") - MergedFromState(num_merged=0, sample=[]) - MissingProfileTarget(profile_name="", target_name="") - InvalidVarsYAML() - DbtProjectError() - DbtProjectErrorException(exc="") - DbtProfileError() - DbtProfileErrorException(exc="") - ProfileListTitle() - ListSingleProfile(profile="") - NoDefinedProfiles() - ProfileHelpMessage() - StarterProjectPath(dir="") - ConfigFolderDirectory(dir="") - NoSampleProfileFound(adapter="") - ProfileWrittenWithSample(name="", path="") - ProfileWrittenWithTargetTemplateYAML(name="", path="") - ProfileWrittenWithProjectTemplateYAML(name="", path="") - SettingUpProfile() - InvalidProfileTemplateYAML() - ProjectNameAlreadyExists(name="") - ProjectCreated(project_name="") - - # E - DB Adapter ====================== - AdapterEventDebug() - AdapterEventInfo() - AdapterEventWarning() - AdapterEventError() - NewConnection(conn_type="", conn_name="") - ConnectionReused(conn_name="") - ConnectionLeftOpen(conn_name="") - ConnectionClosed(conn_name="") - RollbackFailed(conn_name="") - ConnectionClosed2(conn_name="") - ConnectionLeftOpen2(conn_name="") - Rollback(conn_name="") - CacheMiss(conn_name="", database="", schema="") - ListRelations(database="", schema="") - ConnectionUsed(conn_type="", conn_name="") - SQLQuery(conn_name="", sql="") - SQLQueryStatus(status="", elapsed=0.1) - SQLCommit(conn_name="") - ColTypeChange( - orig_type="", new_type="", table=ReferenceKeyMsg(database="", schema="", identifier="") - ) - SchemaCreation(relation=ReferenceKeyMsg(database="", schema="", identifier="")) - SchemaDrop(relation=ReferenceKeyMsg(database="", schema="", identifier="")) - UncachedRelation( - dep_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ref_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ) - AddLink( - dep_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ref_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ) - AddRelation(relation=ReferenceKeyMsg(database="", schema="", identifier="")) - DropMissingRelation(relation=ReferenceKeyMsg(database="", schema="", identifier="")) - DropCascade( - dropped=ReferenceKeyMsg(database="", schema="", identifier=""), - consequences=[ReferenceKeyMsg(database="", schema="", identifier="")], - ) - DropRelation(dropped=ReferenceKeyMsg()) - UpdateReference( - old_key=ReferenceKeyMsg(database="", schema="", identifier=""), - new_key=ReferenceKeyMsg(database="", schema="", identifier=""), - cached_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ) - TemporaryRelation(key=ReferenceKeyMsg(database="", schema="", identifier="")) - RenameSchema( - old_key=ReferenceKeyMsg(database="", schema="", identifier=""), - new_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ) - DumpBeforeAddGraph(dump=dict()) - DumpAfterAddGraph(dump=dict()) - DumpBeforeRenameSchema(dump=dict()) - DumpAfterRenameSchema(dump=dict()) - AdapterImportError(exc="") - PluginLoadError(exc_info="") - NewConnectionOpening(connection_state="") - CodeExecution(conn_name="", code_content="") - CodeExecutionStatus(status="", elapsed=0.1) - WriteCatalogFailure(num_exceptions=0) - CatalogWritten(path="") - CannotGenerateDocs() - BuildingCatalog() - DatabaseErrorRunningHook(hook_type="") - HooksRunning(num_hooks=0, hook_type="") - HookFinished(stat_line="", execution="", execution_time=0) - - # I - Project parsing ====================== - ParseCmdStart() - ParseCmdCompiling() - ParseCmdWritingManifest() - ParseCmdDone() - ManifestDependenciesLoaded() - ManifestLoaderCreated() - ManifestLoaded() - ManifestChecked() - ManifestFlatGraphBuilt() - ParseCmdPerfInfoPath(path="") - GenericTestFileParse(path="") - MacroFileParse(path="") - PartialParsingFullReparseBecauseOfError() - PartialParsingExceptionFile(file="") - PartialParsingFile(file_id="") - PartialParsingException(exc_info={}) - PartialParsingSkipParsing() - PartialParsingMacroChangeStartFullParse() - PartialParsingProjectEnvVarsChanged() - PartialParsingProfileEnvVarsChanged() - PartialParsingDeletedMetric(unique_id="") - ManifestWrongMetadataVersion(version="") - PartialParsingVersionMismatch(saved_version="", current_version="") - PartialParsingFailedBecauseConfigChange() - PartialParsingFailedBecauseProfileChange() - PartialParsingFailedBecauseNewProjectDependency() - PartialParsingFailedBecauseHashChanged() - PartialParsingNotEnabled() - ParsedFileLoadFailed(path="", exc="", exc_info="") - PartialParseSaveFileNotFound() - StaticParserCausedJinjaRendering(path="") - UsingExperimentalParser(path="") - SampleFullJinjaRendering(path="") - StaticParserFallbackJinjaRendering(path="") - StaticParsingMacroOverrideDetected(path="") - StaticParserSuccess(path="") - StaticParserFailure(path="") - ExperimentalParserSuccess(path="") - ExperimentalParserFailure(path="") - PartialParsingEnabled(deleted=0, added=0, changed=0) - PartialParsingAddedFile(file_id="") - PartialParsingDeletedFile(file_id="") - PartialParsingUpdatedFile(file_id="") - PartialParsingNodeMissingInSourceFile(file_id="") - PartialParsingMissingNodes(file_id="") - PartialParsingChildMapMissingUniqueID(unique_id="") - PartialParsingUpdateSchemaFile(file_id="") - PartialParsingDeletedSource(unique_id="") - PartialParsingDeletedExposure(unique_id="") - InvalidDisabledSourceInTestNode(msg="") - InvalidRefInTestNode(msg="") - - # M - Deps generation ====================== - - GitSparseCheckoutSubdirectory(subdir="") - GitProgressCheckoutRevision(revision="") - GitProgressUpdatingExistingDependency(dir="") - GitProgressPullingNewDependency(dir="") - GitNothingToDo(sha="") - GitProgressUpdatedCheckoutRange(start_sha="", end_sha="") - GitProgressCheckedOutAt(end_sha="") - RegistryProgressGETRequest(url="") - RegistryProgressGETResponse(url="", resp_code=1234) - SelectorReportInvalidSelector(valid_selectors="", spec_method="", raw_spec="") - MacroEventInfo(msg="") - MacroEventDebug(msg="") - DepsNoPackagesFound() - DepsStartPackageInstall(package_name="") - DepsInstallInfo(version_name="") - DepsUpdateAvailable(version_latest="") - DepsUpToDate() - DepsListSubdirectory(subdirectory="") - DepsNotifyUpdatesAvailable(packages=ListOfStrings()) - RetryExternalCall(attempt=0, max=0) - RecordRetryException(exc="") - RegistryIndexProgressGETRequest(url="") - RegistryIndexProgressGETResponse(url="", resp_code=1234) - RegistryResponseUnexpectedType(response=""), - RegistryResponseMissingTopKeys(response=""), - RegistryResponseMissingNestedKeys(response=""), - RegistryResponseExtraNestedKeys(response=""), - DepsSetDownloadDirectory(path="") - - # Q - Node execution ====================== - - RunningOperationCaughtError(exc="") - CompileComplete() - FreshnessCheckComplete() - SeedHeader(header="") - SeedHeaderSeparator(len_header=0) - SQLRunnerException(exc="") - PrintErrorTestResult( - name="", - index=0, - num_models=0, - execution_time=0, - ) - PrintPassTestResult( - name="", - index=0, - num_models=0, - execution_time=0, - ) - PrintWarnTestResult( - name="", - index=0, - num_models=0, - execution_time=0, - num_failures=0, - ) - PrintFailureTestResult( - name="", - index=0, - num_models=0, - execution_time=0, - num_failures=0, - ) - PrintStartLine(description="", index=0, total=0, node_info=NodeInfo()) - PrintModelResultLine( - description="", - status="", - index=0, - total=0, - execution_time=0, - ) - PrintModelErrorResultLine( - description="", - status="", - index=0, - total=0, - execution_time=0, - ) - PrintSnapshotErrorResultLine( - status="", - description="", - cfg={}, - index=0, - total=0, - execution_time=0, - ) - PrintSnapshotResultLine( - status="", - description="", - cfg={}, - index=0, - total=0, - execution_time=0, - ) - PrintSeedErrorResultLine( - status="", - index=0, - total=0, - execution_time=0, - schema="", - relation="", - ) - PrintSeedResultLine( - status="", - index=0, - total=0, - execution_time=0, - schema="", - relation="", - ) - PrintFreshnessErrorLine( - source_name="", - table_name="", - index=0, - total=0, - execution_time=0, - ) - PrintFreshnessErrorStaleLine( - source_name="", - table_name="", - index=0, - total=0, - execution_time=0, - ) - PrintFreshnessWarnLine( - source_name="", - table_name="", - index=0, - total=0, - execution_time=0, - ) - PrintFreshnessPassLine( - source_name="", - table_name="", - index=0, - total=0, - execution_time=0, - ) - PrintCancelLine(conn_name="") - DefaultSelector(name="") - NodeStart(unique_id="") - NodeFinished(unique_id="") - QueryCancelationUnsupported(type="") - ConcurrencyLine(num_threads=0, target_name="") - CompilingNode(unique_id="") - WritingInjectedSQLForNode(unique_id="") - NodeCompiling(unique_id="") - NodeExecuting(unique_id="") - PrintHookStartLine( - statement="", - index=0, - total=0, - ) - PrintHookEndLine( - statement="", - status="", - index=0, - total=0, - execution_time=0, - ) - SkippingDetails( - resource_type="", - schema="", - node_name="", - index=0, - total=0, - ) - RunningOperationUncaughtError(exc="") - EndRunResult() - - # W - Node testing ====================== - - CatchableExceptionOnRun(exc="") - InternalExceptionOnRun(build_path="", exc="") - GenericExceptionOnRun(build_path="", unique_id="", exc="") - NodeConnectionReleaseError(node_name="", exc="") - FoundStats(stat_line="") - - # Z - misc ====================== - - MainKeyboardInterrupt() - MainEncounteredError(exc="") - MainStackTrace(stack_trace="") - SystemErrorRetrievingModTime(path="") - SystemCouldNotWrite(path="", reason="", exc="") - SystemExecutingCmd(cmd=[""]) - SystemStdOutMsg(bmsg=b"") - SystemStdErrMsg(bmsg=b"") - SystemReportReturnCode(returncode=0) - TimingInfoCollected() - PrintDebugStackTrace() - CheckCleanPath(path="") - ConfirmCleanPath(path="") - ProtectedCleanPath(path="") - FinishedCleanPaths() - OpenCommand(open_cmd="", profiles_dir="") - EmptyLine() - ServingDocsPort(address="", port=0) - ServingDocsAccessInfo(port="") - ServingDocsExitInfo() - RunResultWarning(resource_type="", node_name="", path="") - RunResultFailure(resource_type="", node_name="", path="") - StatsLine(stats={}) - RunResultError(msg="") - RunResultErrorNoMessage(status="") - SQLCompiledPath(path="") - CheckNodeTestFailure(relation_name="") - FirstRunResultError(msg="") - AfterFirstRunResultError(msg="") - EndOfRunSummary(num_errors=0, num_warnings=0, keyboard_interrupt=False) - PrintSkipBecauseError(schema="", relation="", index=0, total=0) - EnsureGitInstalled() - DepsCreatingLocalSymlink() - DepsSymlinkNotAvailable() - DisableTracking() - SendingEvent(kwargs="") - SendEventFailure() - FlushEvents() - FlushEventsFailure() - TrackingInitializeFailure() - GeneralWarningMsg(msg="", log_fmt="") - GeneralWarningException(exc="", log_fmt="") - EventBufferFull() diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index db824e19bf1..515ec86054b 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -1,24 +1,29 @@ import builtins -import functools -from typing import NoReturn, Optional, Mapping, Any - -from dbt.events.functions import fire_event, scrub_secrets, env_secrets -from dbt.events.types import GeneralWarningMsg, GeneralWarningException +import json +import re +from typing import Any, Dict, List, Mapping, NoReturn, Optional, Union + +# from dbt.contracts.graph import ManifestNode # or ParsedNode? +from dbt.dataclass_schema import ValidationError +from dbt.events.functions import warn_or_error +from dbt.events.helpers import env_secrets, scrub_secrets +from dbt.events.types import JinjaLogWarning +from dbt.events.contextvars import get_node_info from dbt.node_types import NodeType -from dbt import flags -from dbt.ui import line_wrap_message, warning_tag +from dbt.ui import line_wrap_message import dbt.dataclass_schema -def validator_error_message(exc): - """Given a dbt.dataclass_schema.ValidationError (which is basically a - jsonschema.ValidationError), return the relevant parts as a string +class MacroReturn(builtins.BaseException): """ - if not isinstance(exc, dbt.dataclass_schema.ValidationError): - return str(exc) - path = "[%s]" % "][".join(map(repr, exc.relative_path)) - return "at path {}: {}".format(path, exc.message) + Hack of all hacks + This is not actually an exception. + It's how we return a value from a macro. + """ + + def __init__(self, value): + self.value = value class Exception(builtins.Exception): @@ -33,25 +38,53 @@ def data(self): } -class MacroReturn(builtins.BaseException): - """ - Hack of all hacks - """ +class InternalException(Exception): + def __init__(self, msg: str): + self.stack: List = [] + self.msg = scrub_secrets(msg, env_secrets()) - def __init__(self, value): - self.value = value + @property + def type(self): + return "Internal" + def process_stack(self): + lines = [] + stack = self.stack + first = True -class InternalException(Exception): - pass + if len(stack) > 1: + lines.append("") + + for item in stack: + msg = "called by" + + if first: + msg = "in" + first = False + + lines.append(f"> {msg}") + + return lines + + def __str__(self): + if hasattr(self.msg, "split"): + split_msg = self.msg.split("\n") + else: + split_msg = str(self.msg).split("\n") + + lines = ["{}".format(self.type + " Error")] + split_msg + + lines += self.process_stack() + + return lines[0] + "\n" + "\n".join([" " + line for line in lines[1:]]) class RuntimeException(RuntimeError, Exception): CODE = 10001 MESSAGE = "Runtime error" - def __init__(self, msg, node=None): - self.stack = [] + def __init__(self, msg: str, node=None): + self.stack: List = [] self.node = node self.msg = scrub_secrets(msg, env_secrets()) @@ -70,14 +103,14 @@ def node_to_string(self, node): return "" if not hasattr(node, "name"): # we probably failed to parse a block, so we can't know the name - return "{} ({})".format(node.resource_type, node.original_file_path) + return f"{node.resource_type} ({node.original_file_path})" if hasattr(node, "contents"): # handle FileBlocks. They aren't really nodes but we want to render # out the path we know at least. This indicates an error during # block parsing. - return "{}".format(node.path.original_file_path) - return "{} {} ({})".format(node.resource_type, node.name, node.original_file_path) + return f"{node.path.original_file_path}" + return f"{node.resource_type} {node.name} ({node.original_file_path})" def process_stack(self): lines = [] @@ -94,15 +127,24 @@ def process_stack(self): msg = "in" first = False - lines.append("> {} {}".format(msg, self.node_to_string(item))) + lines.append(f"> {msg} {self.node_to_string(item)}") return lines - def __str__(self, prefix="! "): + def validator_error_message(self, exc: builtins.Exception): + """Given a dbt.dataclass_schema.ValidationError (which is basically a + jsonschema.ValidationError), return the relevant parts as a string + """ + if not isinstance(exc, dbt.dataclass_schema.ValidationError): + return str(exc) + path = "[%s]" % "][".join(map(repr, exc.relative_path)) + return f"at path {path}: {exc.message}" + + def __str__(self, prefix: str = "! "): node_string = "" if self.node is not None: - node_string = " in {}".format(self.node_to_string(self.node)) + node_string = f" in {self.node_to_string(self.node)}" if hasattr(self.msg, "split"): split_msg = self.msg.split("\n") @@ -139,7 +181,7 @@ class RPCTimeoutException(RuntimeException): CODE = 10008 MESSAGE = "RPC timeout error" - def __init__(self, timeout): + def __init__(self, timeout: Optional[float]): super().__init__(self.MESSAGE) self.timeout = timeout @@ -148,7 +190,7 @@ def data(self): result.update( { "timeout": self.timeout, - "message": "RPC timed out after {}s".format(self.timeout), + "message": f"RPC timed out after {self.timeout}s", } ) return result @@ -158,15 +200,15 @@ class RPCKilledException(RuntimeException): CODE = 10009 MESSAGE = "RPC process killed" - def __init__(self, signum): + def __init__(self, signum: int): self.signum = signum - self.message = "RPC process killed by signal {}".format(self.signum) - super().__init__(self.message) + self.msg = f"RPC process killed by signal {self.signum}" + super().__init__(self.msg) def data(self): return { "signum": self.signum, - "message": self.message, + "message": self.msg, } @@ -174,7 +216,7 @@ class RPCCompiling(RuntimeException): CODE = 10010 MESSAGE = 'RPC server is compiling the project, call the "status" method for' " compile status" - def __init__(self, msg=None, node=None): + def __init__(self, msg: str = None, node=None): if msg is None: msg = "compile in progress" super().__init__(msg, node) @@ -186,13 +228,13 @@ class RPCLoadException(RuntimeException): 'RPC server failed to compile project, call the "status" method for' " compile status" ) - def __init__(self, cause): + def __init__(self, cause: Dict[str, Any]): self.cause = cause - self.message = "{}: {}".format(self.MESSAGE, self.cause["message"]) - super().__init__(self.message) + self.msg = f'{self.MESSAGE}: {self.cause["message"]}' + super().__init__(self.msg) def data(self): - return {"cause": self.cause, "message": self.message} + return {"cause": self.cause, "message": self.msg} class DatabaseException(RuntimeException): @@ -203,7 +245,7 @@ def process_stack(self): lines = [] if hasattr(self.node, "build_path") and self.node.build_path: - lines.append("compiled Code at {}".format(self.node.build_path)) + lines.append(f"compiled Code at {self.node.build_path}") return lines + RuntimeException.process_stack(self) @@ -220,6 +262,17 @@ class CompilationException(RuntimeException): def type(self): return "Compilation" + def _fix_dupe_msg(self, path_1: str, path_2: str, name: str, type_name: str) -> str: + if path_1 == path_2: + return ( + f"remove one of the {type_name} entries for {name} in this file:\n - {path_1!s}\n" + ) + else: + return ( + f"remove the {type_name} entry for {name} in one of these files:\n" + f" - {path_1!s}\n{path_2!s}" + ) + class RecursionException(RuntimeException): pass @@ -239,14 +292,13 @@ def type(self): return "Parsing" +# TODO: this isn't raised in the core codebase. Is it raised elsewhere? class JSONValidationException(ValidationException): def __init__(self, typename, errors): self.typename = typename self.errors = errors self.errors_message = ", ".join(errors) - msg = 'Invalid arguments passed to "{}" instance: {}'.format( - self.typename, self.errors_message - ) + msg = f'Invalid arguments passed to "{self.typename}" instance: {self.errors_message}' super().__init__(msg) def __reduce__(self): @@ -260,7 +312,7 @@ def __init__(self, expected: str, found: Optional[str]): self.found = found self.filename = "input file" - super().__init__(self.get_message()) + super().__init__(msg=self.get_message()) def add_filename(self, filename: str): self.filename = filename @@ -287,7 +339,7 @@ class JinjaRenderingException(CompilationException): class UndefinedMacroException(CompilationException): - def __str__(self, prefix="! ") -> str: + def __str__(self, prefix: str = "! ") -> str: msg = super().__str__(prefix) return ( f"{msg}. This can happen when calling a macro that does " @@ -304,7 +356,7 @@ def __init__(self, task_id): self.task_id = task_id def __str__(self): - return "{}: {}".format(self.MESSAGE, self.task_id) + return f"{self.MESSAGE}: {self.task_id}" class AliasException(ValidationException): @@ -321,9 +373,9 @@ class DbtConfigError(RuntimeException): CODE = 10007 MESSAGE = "DBT Configuration Error" - def __init__(self, message, project=None, result_type="invalid_project", path=None): + def __init__(self, msg: str, project=None, result_type="invalid_project", path=None): self.project = project - super().__init__(message) + super().__init__(msg) self.result_type = result_type self.path = path @@ -339,8 +391,8 @@ class FailFastException(RuntimeException): CODE = 10013 MESSAGE = "FailFast Error" - def __init__(self, message, result=None, node=None): - super().__init__(msg=message, node=node) + def __init__(self, msg: str, result=None, node=None): + super().__init__(msg=msg, node=node) self.result = result @property @@ -361,7 +413,7 @@ class DbtProfileError(DbtConfigError): class SemverException(Exception): - def __init__(self, msg=None): + def __init__(self, msg: str = None): self.msg = msg if msg is not None: super().__init__(msg) @@ -374,7 +426,10 @@ class VersionsNotCompatibleException(SemverException): class NotImplementedException(Exception): - pass + def __init__(self, msg: str): + self.msg = msg + self.formatted_msg = f"ERROR: {self.msg}" + super().__init__(self.formatted_msg) class FailedToConnectException(DatabaseException): @@ -382,52 +437,58 @@ class FailedToConnectException(DatabaseException): class CommandError(RuntimeException): - def __init__(self, cwd, cmd, message="Error running command"): + def __init__(self, cwd: str, cmd: List[str], msg: str = "Error running command"): cmd_scrubbed = list(scrub_secrets(cmd_txt, env_secrets()) for cmd_txt in cmd) - super().__init__(message) + super().__init__(msg) self.cwd = cwd self.cmd = cmd_scrubbed - self.args = (cwd, cmd_scrubbed, message) + self.args = (cwd, cmd_scrubbed, msg) def __str__(self): if len(self.cmd) == 0: - return "{}: No arguments given".format(self.msg) - return '{}: "{}"'.format(self.msg, self.cmd[0]) + return f"{self.msg}: No arguments given" + return f'{self.msg}: "{self.cmd[0]}"' class ExecutableError(CommandError): - def __init__(self, cwd, cmd, message): - super().__init__(cwd, cmd, message) + def __init__(self, cwd: str, cmd: List[str], msg: str): + super().__init__(cwd, cmd, msg) class WorkingDirectoryError(CommandError): - def __init__(self, cwd, cmd, message): - super().__init__(cwd, cmd, message) + def __init__(self, cwd: str, cmd: List[str], msg: str): + super().__init__(cwd, cmd, msg) def __str__(self): - return '{}: "{}"'.format(self.msg, self.cwd) + return f'{self.msg}: "{self.cwd}"' class CommandResultError(CommandError): - def __init__(self, cwd, cmd, returncode, stdout, stderr, message="Got a non-zero returncode"): - super().__init__(cwd, cmd, message) + def __init__( + self, + cwd: str, + cmd: List[str], + returncode: Union[int, Any], + stdout: bytes, + stderr: bytes, + msg: str = "Got a non-zero returncode", + ): + super().__init__(cwd, cmd, msg) self.returncode = returncode self.stdout = scrub_secrets(stdout.decode("utf-8"), env_secrets()) self.stderr = scrub_secrets(stderr.decode("utf-8"), env_secrets()) - self.args = (cwd, self.cmd, returncode, self.stdout, self.stderr, message) + self.args = (cwd, self.cmd, returncode, self.stdout, self.stderr, msg) def __str__(self): - return "{} running: {}".format(self.msg, self.cmd) + return f"{self.msg} running: {self.cmd}" class InvalidConnectionException(RuntimeException): - def __init__(self, thread_id, known, node=None): + def __init__(self, thread_id, known: List): self.thread_id = thread_id self.known = known super().__init__( - msg="connection never acquired for thread {}, have {}".format( - self.thread_id, self.known - ) + msg="connection never acquired for thread {self.thread_id}, have {self.known}" ) @@ -441,694 +502,1874 @@ class DuplicateYamlKeyException(CompilationException): pass -def raise_compiler_error(msg, node=None) -> NoReturn: - raise CompilationException(msg, node) +class ConnectionException(Exception): + """ + There was a problem with the connection that returned a bad response, + timed out, or resulted in a file that is corrupt. + """ + pass -def raise_parsing_error(msg, node=None) -> NoReturn: - raise ParsingException(msg, node) +# event level exception +class EventCompilationException(CompilationException): + def __init__(self, msg: str, node): + self.msg = scrub_secrets(msg, env_secrets()) + self.node = node + super().__init__(msg=self.msg) -def raise_database_error(msg, node=None) -> NoReturn: - raise DatabaseException(msg, node) +# compilation level exceptions +class GraphDependencyNotFound(CompilationException): + def __init__(self, node, dependency: str): + self.node = node + self.dependency = dependency + super().__init__(msg=self.get_message()) -def raise_dependency_error(msg) -> NoReturn: - raise DependencyException(scrub_secrets(msg, env_secrets())) + def get_message(self) -> str: + msg = f"'{self.node.unique_id}' depends on '{self.dependency}' which is not in the graph!" + return msg -def raise_git_cloning_error(error: CommandResultError) -> NoReturn: - error.cmd = scrub_secrets(str(error.cmd), env_secrets()) - raise error +# client level exceptions -def raise_git_cloning_problem(repo) -> NoReturn: - repo = scrub_secrets(repo, env_secrets()) - msg = """\ - Something went wrong while cloning {} - Check the debug logs for more information - """ - raise RuntimeException(msg.format(repo)) +class NoSupportedLanguagesFound(CompilationException): + def __init__(self, node): + self.node = node + self.msg = f"No supported_languages found in materialization macro {self.node.name}" + super().__init__(msg=self.msg) -def disallow_secret_env_var(env_var_name) -> NoReturn: - """Raise an error when a secret env var is referenced outside allowed - rendering contexts""" - msg = ( - "Secret env vars are allowed only in profiles.yml or packages.yml. " - "Found '{env_var_name}' referenced elsewhere." - ) - raise_parsing_error(msg.format(env_var_name=env_var_name)) +class MaterializtionMacroNotUsed(CompilationException): + def __init__(self, node): + self.node = node + self.msg = "Only materialization macros can be used with this function" + super().__init__(msg=self.msg) -def invalid_type_error( - method_name, arg_name, got_value, expected_type, version="0.13.0" -) -> NoReturn: - """Raise a CompilationException when an adapter method available to macros - has changed. - """ - got_type = type(got_value) - msg = ( - "As of {version}, 'adapter.{method_name}' expects argument " - "'{arg_name}' to be of type '{expected_type}', instead got " - "{got_value} ({got_type})" - ) - raise_compiler_error( - msg.format( - version=version, - method_name=method_name, - arg_name=arg_name, - expected_type=expected_type, - got_value=got_value, - got_type=got_type, - ) - ) +class UndefinedCompilation(CompilationException): + def __init__(self, name: str, node): + self.name = name + self.node = node + self.msg = f"{self.name} is undefined" + super().__init__(msg=self.msg) -def invalid_bool_error(got_value, macro_name) -> NoReturn: - """Raise a CompilationException when a macro expects a boolean but gets some - other value. - """ - msg = ( - "Macro '{macro_name}' returns '{got_value}'. It is not type 'bool' " - "and cannot not be converted reliably to a bool." - ) - raise_compiler_error(msg.format(macro_name=macro_name, got_value=got_value)) +class CaughtMacroExceptionWithNode(CompilationException): + def __init__(self, exc, node): + self.exc = exc + self.node = node + super().__init__(msg=str(exc)) -def ref_invalid_args(model, args) -> NoReturn: - raise_compiler_error("ref() takes at most two arguments ({} given)".format(len(args)), model) +class CaughtMacroException(CompilationException): + def __init__(self, exc): + self.exc = exc + super().__init__(msg=str(exc)) -def metric_invalid_args(model, args) -> NoReturn: - raise_compiler_error( - "metric() takes at most two arguments ({} given)".format(len(args)), model - ) +class MacroNameNotString(CompilationException): + def __init__(self, kwarg_value): + self.kwarg_value = kwarg_value + super().__init__(msg=self.get_message()) + def get_message(self) -> str: + msg = ( + f"The macro_name parameter ({self.kwarg_value}) " + "to adapter.dispatch was not a string" + ) + return msg -def ref_bad_context(model, args) -> NoReturn: - ref_args = ", ".join("'{}'".format(a) for a in args) - ref_string = "{{{{ ref({}) }}}}".format(ref_args) - base_error_msg = """dbt was unable to infer all dependencies for the model "{model_name}". -This typically happens when ref() is placed within a conditional block. +class MissingControlFlowStartTag(CompilationException): + def __init__(self, tag, expected_tag: str, tag_parser): + self.tag = tag + self.expected_tag = expected_tag + self.tag_parser = tag_parser + super().__init__(msg=self.get_message()) -To fix this, add the following hint to the top of the model "{model_name}": + def get_message(self) -> str: + linepos = self.tag_parser.linepos(self.tag.start) + msg = ( + f"Got an unexpected control flow end tag, got {self.tag.block_type_name} but " + f"expected {self.expected_tag} next (@ {linepos})" + ) + return msg --- depends_on: {ref_string}""" - # This explicitly references model['name'], instead of model['alias'], for - # better error messages. Ex. If models foo_users and bar_users are aliased - # to 'users', in their respective schemas, then you would want to see - # 'bar_users' in your error messge instead of just 'users'. - if isinstance(model, dict): # TODO: remove this path - model_name = model["name"] - model_path = model["path"] - else: - model_name = model.name - model_path = model.path - error_msg = base_error_msg.format( - model_name=model_name, model_path=model_path, ref_string=ref_string - ) - raise_compiler_error(error_msg, model) +class UnexpectedControlFlowEndTag(CompilationException): + def __init__(self, tag, expected_tag: str, tag_parser): + self.tag = tag + self.expected_tag = expected_tag + self.tag_parser = tag_parser + super().__init__(msg=self.get_message()) -def doc_invalid_args(model, args) -> NoReturn: - raise_compiler_error("doc() takes at most two arguments ({} given)".format(len(args)), model) + def get_message(self) -> str: + linepos = self.tag_parser.linepos(self.tag.start) + msg = ( + f"Got an unexpected control flow end tag, got {self.tag.block_type_name} but " + f"never saw a preceeding {self.expected_tag} (@ {linepos})" + ) + return msg -def doc_target_not_found( - model, target_doc_name: str, target_doc_package: Optional[str] -) -> NoReturn: - target_package_string = "" +class UnexpectedMacroEOF(CompilationException): + def __init__(self, expected_name: str, actual_name: str): + self.expected_name = expected_name + self.actual_name = actual_name + super().__init__(msg=self.get_message()) - if target_doc_package is not None: - target_package_string = "in package '{}' ".format(target_doc_package) + def get_message(self) -> str: + msg = f'unexpected EOF, expected {self.expected_name}, got "{self.actual_name}"' + return msg - msg = ("Documentation for '{}' depends on doc '{}' {} which was not found").format( - model.unique_id, target_doc_name, target_package_string - ) - raise_compiler_error(msg, model) +class MacroNamespaceNotString(CompilationException): + def __init__(self, kwarg_type: Any): + self.kwarg_type = kwarg_type + super().__init__(msg=self.get_message()) -def _get_target_failure_msg( - original_file_path, - unique_id, - resource_type_title, - target_name: str, - target_model_package: Optional[str], - include_path: bool, - reason: str, - target_kind: str, -) -> str: - target_package_string = "" - if target_model_package is not None: - target_package_string = "in package '{}' ".format(target_model_package) - - source_path_string = "" - if include_path: - source_path_string = " ({})".format(original_file_path) - - return "{} '{}'{} depends on a {} named '{}' {}which {}".format( - resource_type_title, - unique_id, - source_path_string, - target_kind, - target_name, - target_package_string, - reason, - ) + def get_message(self) -> str: + msg = ( + "The macro_namespace parameter to adapter.dispatch " + f"is a {self.kwarg_type}, not a string" + ) + return msg -def get_target_not_found_or_disabled_msg( - node, - target_name: str, - target_package: Optional[str], - disabled: Optional[bool] = None, -) -> str: - if disabled is None: - reason = "was not found or is disabled" - elif disabled is True: - reason = "is disabled" - else: - reason = "was not found" - return _get_target_failure_msg( - node.original_file_path, - node.unique_id, - node.resource_type.title(), - target_name, - target_package, - include_path=True, - reason=reason, - target_kind="node", - ) +class NestedTags(CompilationException): + def __init__(self, outer, inner): + self.outer = outer + self.inner = inner + super().__init__(msg=self.get_message()) + def get_message(self) -> str: + msg = ( + f"Got nested tags: {self.outer.block_type_name} (started at {self.outer.start}) did " + f"not have a matching {{{{% end{self.outer.block_type_name} %}}}} before a " + f"subsequent {self.inner.block_type_name} was found (started at {self.inner.start})" + ) + return msg -def ref_target_not_found( - model, - target_model_name: str, - target_model_package: Optional[str], - disabled: Optional[bool] = None, -) -> NoReturn: - msg = get_target_not_found_or_disabled_msg( - model, target_model_name, target_model_package, disabled - ) - raise_compiler_error(msg, model) +class BlockDefinitionNotAtTop(CompilationException): + def __init__(self, tag_parser, tag_start): + self.tag_parser = tag_parser + self.tag_start = tag_start + super().__init__(msg=self.get_message()) -def get_not_found_or_disabled_msg( - node, - target_name: str, - target_kind: str, - target_package: Optional[str] = None, - disabled: Optional[bool] = None, -) -> str: - if disabled is None: - reason = "was not found or is disabled" - elif disabled is True: - reason = "is disabled" - else: - reason = "was not found" - return _get_target_failure_msg( - node.original_file_path, - node.unique_id, - node.resource_type.title(), - target_name, - target_package, - include_path=True, - reason=reason, - target_kind=target_kind, - ) + def get_message(self) -> str: + position = self.tag_parser.linepos(self.tag_start) + msg = ( + f"Got a block definition inside control flow at {position}. " + "All dbt block definitions must be at the top level" + ) + return msg -def target_not_found( - node, - target_name: str, - target_kind: str, - target_package: Optional[str] = None, - disabled: Optional[bool] = None, -) -> NoReturn: - msg = get_not_found_or_disabled_msg( - node=node, - target_name=target_name, - target_kind=target_kind, - target_package=target_package, - disabled=disabled, - ) +class MissingCloseTag(CompilationException): + def __init__(self, block_type_name: str, linecount: int): + self.block_type_name = block_type_name + self.linecount = linecount + super().__init__(msg=self.get_message()) - raise_compiler_error(msg, node) + def get_message(self) -> str: + msg = f"Reached EOF without finding a close tag for {self.block_type_name} (searched from line {self.linecount})" + return msg -def dependency_not_found(model, target_model_name): - raise_compiler_error( - "'{}' depends on '{}' which is not in the graph!".format( - model.unique_id, target_model_name - ), - model, - ) +class GitCloningProblem(RuntimeException): + def __init__(self, repo: str): + self.repo = scrub_secrets(repo, env_secrets()) + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"""\ + Something went wrong while cloning {self.repo} + Check the debug logs for more information + """ + return msg -def macro_not_found(model, target_macro_id): - raise_compiler_error( - model, - "'{}' references macro '{}' which is not defined!".format( - model.unique_id, target_macro_id - ), - ) +class GitCloningError(InternalException): + def __init__(self, repo: str, revision: str, error: CommandResultError): + self.repo = repo + self.revision = revision + self.error = error + super().__init__(msg=self.get_message()) + def get_message(self) -> str: + stderr = self.error.stderr.strip() + if "usage: git" in stderr: + stderr = stderr.split("\nusage: git")[0] + if re.match("fatal: destination path '(.+)' already exists", stderr): + self.error.cmd = list(scrub_secrets(str(self.error.cmd), env_secrets())) + raise self.error -def macro_invalid_dispatch_arg(macro_name) -> NoReturn: - msg = """\ - The "packages" argument of adapter.dispatch() has been deprecated. - Use the "macro_namespace" argument instead. + msg = f"Error checking out spec='{self.revision}' for repo {self.repo}\n{stderr}" + return scrub_secrets(msg, env_secrets()) - Raised during dispatch for: {} - For more information, see: +class GitCheckoutError(InternalException): + def __init__(self, repo: str, revision: str, error: CommandResultError): + self.repo = repo + self.revision = revision + self.stderr = error.stderr.strip() + super().__init__(msg=self.get_message()) - https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch - """ - raise_compiler_error(msg.format(macro_name)) + def get_message(self) -> str: + msg = f"Error checking out spec='{self.revision}' for repo {self.repo}\n{self.stderr}" + return scrub_secrets(msg, env_secrets()) -def materialization_not_available(model, adapter_type): - materialization = model.get_materialization() +class InvalidMaterializationArg(CompilationException): + def __init__(self, name: str, argument: str): + self.name = name + self.argument = argument + super().__init__(msg=self.get_message()) - raise_compiler_error( - "Materialization '{}' is not available for {}!".format(materialization, adapter_type), - model, - ) + def get_message(self) -> str: + msg = f"materialization '{self.name}' received unknown argument '{self.argument}'." + return msg -def missing_materialization(model, adapter_type): - materialization = model.get_materialization() +class SymbolicLinkError(CompilationException): + def __init__(self): + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + "dbt encountered an error when attempting to create a symbolic link. " + "If this error persists, please create an issue at: \n\n" + "https://github.com/dbt-labs/dbt-core" + ) - valid_types = "'default'" + return msg - if adapter_type != "default": - valid_types = "'default' and '{}'".format(adapter_type) - raise_compiler_error( - "No materialization '{}' was found for adapter {}! (searched types {})".format( - materialization, adapter_type, valid_types - ), - model, - ) +# context level exceptions -def bad_package_spec(repo, spec, error_message): - msg = "Error checking out spec='{}' for repo {}\n{}".format(spec, repo, error_message) - raise InternalException(scrub_secrets(msg, env_secrets())) +class ZipStrictWrongType(CompilationException): + def __init__(self, exc): + self.exc = exc + msg = str(self.exc) + super().__init__(msg=msg) -def raise_cache_inconsistent(message): - raise InternalException("Cache inconsistency detected: {}".format(message)) +class SetStrictWrongType(CompilationException): + def __init__(self, exc): + self.exc = exc + msg = str(self.exc) + super().__init__(msg=msg) -def missing_config(model, name): - raise_compiler_error( - "Model '{}' does not define a required config parameter '{}'.".format( - model.unique_id, name - ), - model, - ) +class LoadAgateTableValueError(CompilationException): + def __init__(self, exc: ValueError, node): + self.exc = exc + self.node = node + msg = str(self.exc) + super().__init__(msg=msg) -def missing_relation(relation, model=None): - raise_compiler_error("Relation {} not found!".format(relation), model) +class LoadAgateTableNotSeed(CompilationException): + def __init__(self, resource_type, node): + self.resource_type = resource_type + self.node = node + msg = f"can only load_agate_table for seeds (got a {self.resource_type})" + super().__init__(msg=msg) -def raise_dataclass_not_dict(obj): - msg = ( - 'The object ("{obj}") was used as a dictionary. This ' - "capability has been removed from objects of this type." - ) - raise_compiler_error(msg) +class MacrosSourcesUnWriteable(CompilationException): + def __init__(self, node): + self.node = node + msg = 'cannot "write" macros or sources' + super().__init__(msg=msg) -def relation_wrong_type(relation, expected_type, model=None): - raise_compiler_error( - ( - "Trying to create {expected_type} {relation}, " - "but it currently exists as a {current_type}. Either " - "drop {relation} manually, or run dbt with " - "`--full-refresh` and dbt will drop it for you." - ).format(relation=relation, current_type=relation.type, expected_type=expected_type), - model, - ) +class PackageNotInDeps(CompilationException): + def __init__(self, package_name: str, node): + self.package_name = package_name + self.node = node + msg = f"Node package named {self.package_name} not found!" + super().__init__(msg=msg) -def package_not_found(package_name): - raise_dependency_error("Package {} was not found in the package index".format(package_name)) +class OperationsCannotRefEphemeralNodes(CompilationException): + def __init__(self, target_name: str, node): + self.target_name = target_name + self.node = node + msg = f"Operations can not ref() ephemeral nodes, but {target_name} is ephemeral" + super().__init__(msg=msg) -def package_version_not_found( - package_name, version_range, available_versions, should_version_check -): - base_msg = ( - "Could not find a matching compatible version for package {}\n" - " Requested range: {}\n" - " Compatible versions: {}\n" - ) - addendum = ( - ( - "\n" - " Not shown: package versions incompatible with installed version of dbt-core\n" - " To include them, run 'dbt --no-version-check deps'" +class InvalidPersistDocsValueType(CompilationException): + def __init__(self, persist_docs: Any): + self.persist_docs = persist_docs + msg = ( + "Invalid value provided for 'persist_docs'. Expected dict " + f"but received {type(self.persist_docs)}" ) - if should_version_check - else "" - ) - msg = base_msg.format(package_name, version_range, available_versions) + addendum - raise_dependency_error(msg) - + super().__init__(msg=msg) -def invalid_materialization_argument(name, argument): - raise_compiler_error( - "materialization '{}' received unknown argument '{}'.".format(name, argument) - ) +class InvalidInlineModelConfig(CompilationException): + def __init__(self, node): + self.node = node + msg = "Invalid inline model config" + super().__init__(msg=msg) -def system_error(operation_name): - raise_compiler_error( - "dbt encountered an error when attempting to {}. " - "If this error persists, please create an issue at: \n\n" - "https://github.com/dbt-labs/dbt-core".format(operation_name) - ) +class ConflictingConfigKeys(CompilationException): + def __init__(self, oldkey: str, newkey: str, node): + self.oldkey = oldkey + self.newkey = newkey + self.node = node + msg = f'Invalid config, has conflicting keys "{self.oldkey}" and "{self.newkey}"' + super().__init__(msg=msg) -class ConnectionException(Exception): - """ - There was a problem with the connection that returned a bad response, - timed out, or resulted in a file that is corrupt. - """ - pass +class InvalidNumberSourceArgs(CompilationException): + def __init__(self, args, node): + self.args = args + self.node = node + msg = f"source() takes exactly two arguments ({len(self.args)} given)" + super().__init__(msg=msg) -def raise_dep_not_found(node, node_description, required_pkg): - raise_compiler_error( - 'Error while parsing {}.\nThe required package "{}" was not found. ' - "Is the package installed?\nHint: You may need to run " - "`dbt deps`.".format(node_description, required_pkg), - node=node, - ) +class RequiredVarNotFound(CompilationException): + def __init__(self, var_name: str, merged: Dict, node): + self.var_name = var_name + self.merged = merged + self.node = node + super().__init__(msg=self.get_message()) + def get_message(self) -> str: + if self.node is not None: + node_name = self.node.name + else: + node_name = "" -def multiple_matching_relations(kwargs, matches): - raise_compiler_error( - "get_relation returned more than one relation with the given args. " - "Please specify a database or schema to narrow down the result set." - "\n{}\n\n{}".format(kwargs, matches) - ) + dct = {k: self.merged[k] for k in self.merged} + pretty_vars = json.dumps(dct, sort_keys=True, indent=4) + msg = f"Required var '{self.var_name}' not found in config:\nVars supplied to {node_name} = {pretty_vars}" + return msg -def get_relation_returned_multiple_results(kwargs, matches): - multiple_matching_relations(kwargs, matches) +class PackageNotFoundForMacro(CompilationException): + def __init__(self, package_name: str): + self.package_name = package_name + msg = f"Could not find package '{self.package_name}'" + super().__init__(msg=msg) -def approximate_relation_match(target, relation): - raise_compiler_error( - "When searching for a relation, dbt found an approximate match. " - "Instead of guessing \nwhich relation to use, dbt will move on. " - "Please delete {relation}, or rename it to be less ambiguous." - "\nSearched for: {target}\nFound: {relation}".format(target=target, relation=relation) - ) +class DisallowSecretEnvVar(ParsingException): + def __init__(self, env_var_name: str): + self.env_var_name = env_var_name + super().__init__(msg=self.get_message()) -def raise_duplicate_macro_name(node_1, node_2, namespace) -> NoReturn: - duped_name = node_1.name - if node_1.package_name != node_2.package_name: - extra = ' ("{}" and "{}" are both in the "{}" namespace)'.format( - node_1.package_name, node_2.package_name, namespace - ) - else: - extra = "" - - raise_compiler_error( - 'dbt found two macros with the name "{}" in the namespace "{}"{}. ' - "Since these macros have the same name and exist in the same " - "namespace, dbt will be unable to decide which to call. To fix this, " - "change the name of one of these macros:\n- {} ({})\n- {} ({})".format( - duped_name, - namespace, - extra, - node_1.unique_id, - node_1.original_file_path, - node_2.unique_id, - node_2.original_file_path, + def get_message(self) -> str: + msg = ( + "Secret env vars are allowed only in profiles.yml or packages.yml. " + f"Found '{self.env_var_name}' referenced elsewhere." ) - ) - + return msg -def raise_duplicate_resource_name(node_1, node_2): - duped_name = node_1.name - node_type = NodeType(node_1.resource_type) - pluralized = ( - node_type.pluralize() - if node_1.resource_type == node_2.resource_type - else "resources" # still raise if ref() collision, e.g. model + seed - ) - action = "looking for" - # duplicate 'ref' targets - if node_type in NodeType.refable(): - formatted_name = f'ref("{duped_name}")' - # duplicate sources - elif node_type == NodeType.Source: - duped_name = node_1.get_full_source_name() - formatted_name = node_1.get_source_representation() - # duplicate docs blocks - elif node_type == NodeType.Documentation: - formatted_name = f'doc("{duped_name}")' - # duplicate generic tests - elif node_type == NodeType.Test and hasattr(node_1, "test_metadata"): - column_name = f'column "{node_1.column_name}" in ' if node_1.column_name else "" - model_name = node_1.file_key_name - duped_name = f'{node_1.name}" defined on {column_name}"{model_name}' - action = "running" - formatted_name = "tests" - # all other resource types - else: - formatted_name = duped_name - - # should this be raise_parsing_error instead? - raise_compiler_error( - f""" -dbt found two {pluralized} with the name "{duped_name}". +class InvalidMacroArgType(CompilationException): + def __init__(self, method_name: str, arg_name: str, got_value: Any, expected_type): + self.method_name = method_name + self.arg_name = arg_name + self.got_value = got_value + self.expected_type = expected_type + super().__init__(msg=self.get_message()) -Since these resources have the same name, dbt will be unable to find the correct resource + def get_message(self) -> str: + got_type = type(self.got_value) + msg = ( + f"'adapter.{self.method_name}' expects argument " + f"'{self.arg_name}' to be of type '{self.expected_type}', instead got " + f"{self.got_value} ({got_type})" + ) + return msg + + +class InvalidBoolean(CompilationException): + def __init__(self, return_value: Any, macro_name: str): + self.return_value = return_value + self.macro_name = macro_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Macro '{self.macro_name}' returns '{self.return_value}'. It is not type 'bool' " + "and cannot not be converted reliably to a bool." + ) + return msg + + +class RefInvalidArgs(CompilationException): + def __init__(self, node, args): + self.node = node + self.args = args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"ref() takes at most two arguments ({len(self.args)} given)" + return msg + + +class MetricInvalidArgs(CompilationException): + def __init__(self, node, args): + self.node = node + self.args = args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"metric() takes at most two arguments ({len(self.args)} given)" + return msg + + +class RefBadContext(CompilationException): + def __init__(self, node, args): + self.node = node + self.args = args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + # This explicitly references model['name'], instead of model['alias'], for + # better error messages. Ex. If models foo_users and bar_users are aliased + # to 'users', in their respective schemas, then you would want to see + # 'bar_users' in your error messge instead of just 'users'. + if isinstance(self.node, dict): + model_name = self.node["name"] + else: + model_name = self.node.name + + ref_args = ", ".join("'{}'".format(a) for a in self.args) + ref_string = f"{{{{ ref({ref_args}) }}}}" + + msg = f"""dbt was unable to infer all dependencies for the model "{model_name}". +This typically happens when ref() is placed within a conditional block. + +To fix this, add the following hint to the top of the model "{model_name}": + +-- depends_on: {ref_string}""" + + return msg + + +class InvalidDocArgs(CompilationException): + def __init__(self, node, args): + self.node = node + self.args = args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"doc() takes at most two arguments ({len(self.args)} given)" + return msg + + +class DocTargetNotFound(CompilationException): + def __init__(self, node, target_doc_name: str, target_doc_package: Optional[str]): + self.node = node + self.target_doc_name = target_doc_name + self.target_doc_package = target_doc_package + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + target_package_string = "" + if self.target_doc_package is not None: + target_package_string = f"in package '{self. target_doc_package}' " + msg = f"Documentation for '{self.node.unique_id}' depends on doc '{self.target_doc_name}' {target_package_string} which was not found" + return msg + + +class MacroInvalidDispatchArg(CompilationException): + def __init__(self, macro_name: str): + self.macro_name = macro_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"""\ + The "packages" argument of adapter.dispatch() has been deprecated. + Use the "macro_namespace" argument instead. + + Raised during dispatch for: {self.macro_name} + + For more information, see: + + https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch + """ + return msg + + +class DuplicateMacroName(CompilationException): + def __init__(self, node_1, node_2, namespace: str): + self.node_1 = node_1 + self.node_2 = node_2 + self.namespace = namespace + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + duped_name = self.node_1.name + if self.node_1.package_name != self.node_2.package_name: + extra = f' ("{self.node_1.package_name}" and "{self.node_2.package_name}" are both in the "{self.namespace}" namespace)' + else: + extra = "" + + msg = ( + f'dbt found two macros with the name "{duped_name}" in the namespace "{self.namespace}"{extra}. ' + "Since these macros have the same name and exist in the same " + "namespace, dbt will be unable to decide which to call. To fix this, " + f"change the name of one of these macros:\n- {self.node_1.unique_id} " + f"({self.node_1.original_file_path})\n- {self.node_2.unique_id} ({self.node_2.original_file_path})" + ) + + return msg + + +# parser level exceptions +class InvalidDictParse(ParsingException): + def __init__(self, exc: ValidationError, node): + self.exc = exc + self.node = node + msg = self.validator_error_message(exc) + super().__init__(msg=msg) + + +class InvalidConfigUpdate(ParsingException): + def __init__(self, exc: ValidationError, node): + self.exc = exc + self.node = node + msg = self.validator_error_message(exc) + super().__init__(msg=msg) + + +class PythonParsingException(ParsingException): + def __init__(self, exc: SyntaxError, node): + self.exc = exc + self.node = node + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + validated_exc = self.validator_error_message(self.exc) + msg = f"{validated_exc}\n{self.exc.text}" + return msg + + +class PythonLiteralEval(ParsingException): + def __init__(self, exc: Exception, node): + self.exc = exc + self.node = node + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Error when trying to literal_eval an arg to dbt.ref(), dbt.source(), dbt.config() or dbt.config.get() \n{self.exc}\n" + "https://docs.python.org/3/library/ast.html#ast.literal_eval\n" + "In dbt python model, `dbt.ref`, `dbt.source`, `dbt.config`, `dbt.config.get` function args only support Python literal structures" + ) + + return msg + + +class InvalidModelConfig(ParsingException): + def __init__(self, exc: ValidationError, node): + self.msg = self.validator_error_message(exc) + self.node = node + super().__init__(msg=self.msg) + + +class YamlParseListFailure(ParsingException): + def __init__( + self, + path: str, + key: str, + yaml_data: List, + cause, + ): + self.path = path + self.key = key + self.yaml_data = yaml_data + self.cause = cause + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + if isinstance(self.cause, str): + reason = self.cause + elif isinstance(self.cause, ValidationError): + reason = self.validator_error_message(self.cause) + else: + reason = self.cause.msg + msg = f"Invalid {self.key} config given in {self.path} @ {self.key}: {self.yaml_data} - {reason}" + return msg + + +class YamlParseDictFailure(ParsingException): + def __init__( + self, + path: str, + key: str, + yaml_data: Dict[str, Any], + cause, + ): + self.path = path + self.key = key + self.yaml_data = yaml_data + self.cause = cause + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + if isinstance(self.cause, str): + reason = self.cause + elif isinstance(self.cause, ValidationError): + reason = self.validator_error_message(self.cause) + else: + reason = self.cause.msg + msg = f"Invalid {self.key} config given in {self.path} @ {self.key}: {self.yaml_data} - {reason}" + return msg + + +class YamlLoadFailure(ParsingException): + def __init__(self, project_name: Optional[str], path: str, exc: ValidationException): + self.project_name = project_name + self.path = path + self.exc = exc + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + reason = self.validator_error_message(self.exc) + + msg = f"Error reading {self.project_name}: {self.path} - {reason}" + + return msg + + +class InvalidTestConfig(ParsingException): + def __init__(self, exc: ValidationError, node): + self.msg = self.validator_error_message(exc) + self.node = node + super().__init__(msg=self.msg) + + +class InvalidSchemaConfig(ParsingException): + def __init__(self, exc: ValidationError, node): + self.msg = self.validator_error_message(exc) + self.node = node + super().__init__(msg=self.msg) + + +class InvalidSnapshopConfig(ParsingException): + def __init__(self, exc: ValidationError, node): + self.msg = self.validator_error_message(exc) + self.node = node + super().__init__(msg=self.msg) + + +class SameKeyNested(CompilationException): + def __init__(self): + msg = "Test cannot have the same key at the top-level and in config" + super().__init__(msg=msg) + + +class TestArgIncludesModel(CompilationException): + def __init__(self): + msg = 'Test arguments include "model", which is a reserved argument' + super().__init__(msg=msg) + + +class UnexpectedTestNamePattern(CompilationException): + def __init__(self, test_name: str): + self.test_name = test_name + msg = f"Test name string did not match expected pattern: {self.test_name}" + super().__init__(msg=msg) + + +class CustomMacroPopulatingConfigValues(CompilationException): + def __init__( + self, target_name: str, column_name: Optional[str], name: str, key: str, err_msg: str + ): + self.target_name = target_name + self.column_name = column_name + self.name = name + self.key = key + self.err_msg = err_msg + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + # Generic tests do not include custom macros in the Jinja + # rendering context, so this will almost always fail. As it + # currently stands, the error message is inscrutable, which + # has caused issues for some projects migrating from + # pre-0.20.0 to post-0.20.0. + # See https://github.com/dbt-labs/dbt-core/issues/4103 + # and https://github.com/dbt-labs/dbt-core/issues/5294 + + msg = ( + f"The {self.target_name}.{self.column_name} column's " + f'"{self.name}" test references an undefined ' + f"macro in its {self.key} configuration argument. " + f"The macro {self.err_msg}.\n" + "Please note that the generic test configuration parser " + "currently does not support using custom macros to " + "populate configuration values" + ) + return msg + + +class TagsNotListOfStrings(CompilationException): + def __init__(self, tags: Any): + self.tags = tags + msg = f"got {self.tags} ({type(self.tags)}) for tags, expected a list of strings" + super().__init__(msg=msg) + + +class TagNotString(CompilationException): + def __init__(self, tag: Any): + self.tag = tag + msg = f"got {self.tag} ({type(self.tag)}) for tag, expected a str" + super().__init__(msg=msg) + + +class TestNameNotString(ParsingException): + def __init__(self, test_name: Any): + self.test_name = test_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = f"test name must be a str, got {type(self.test_name)} (value {self.test_name})" + return msg + + +class TestArgsNotDict(ParsingException): + def __init__(self, test_args: Any): + self.test_args = test_args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = f"test arguments must be a dict, got {type(self.test_args)} (value {self.test_args})" + return msg + + +class TestDefinitionDictLength(ParsingException): + def __init__(self, test): + self.test = test + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = ( + "test definition dictionary must have exactly one key, got" + f" {self.test} instead ({len(self.test)} keys)" + ) + return msg + + +class TestInvalidType(ParsingException): + def __init__(self, test: Any): + self.test = test + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"test must be dict or str, got {type(self.test)} (value {self.test})" + return msg + + +# This is triggered across multiple files +class EnvVarMissing(ParsingException): + def __init__(self, var: str): + self.var = var + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"Env var required but not provided: '{self.var}'" + return msg + + +class TargetNotFound(CompilationException): + def __init__( + self, + node, + target_name: str, + target_kind: str, + target_package: Optional[str] = None, + disabled: Optional[bool] = None, + ): + self.node = node + self.target_name = target_name + self.target_kind = target_kind + self.target_package = target_package + self.disabled = disabled + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + original_file_path = self.node.original_file_path + unique_id = self.node.unique_id + resource_type_title = self.node.resource_type.title() + + if self.disabled is None: + reason = "was not found or is disabled" + elif self.disabled is True: + reason = "is disabled" + else: + reason = "was not found" + + target_package_string = "" + if self.target_package is not None: + target_package_string = f"in package '{self.target_package}' " + + msg = ( + f"{resource_type_title} '{unique_id}' ({original_file_path}) depends on a " + f"{self.target_kind} named '{self.target_name}' {target_package_string}which {reason}" + ) + return msg + + +class DuplicateSourcePatchName(CompilationException): + def __init__(self, patch_1, patch_2): + self.patch_1 = patch_1 + self.patch_2 = patch_2 + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + name = f"{self.patch_1.overrides}.{self.patch_1.name}" + fix = self._fix_dupe_msg( + self.patch_1.path, + self.patch_2.path, + name, + "sources", + ) + msg = ( + f"dbt found two schema.yml entries for the same source named " + f"{self.patch_1.name} in package {self.patch_1.overrides}. Sources may only be " + f"overridden a single time. To fix this, {fix}" + ) + return msg + + +class DuplicateMacroPatchName(CompilationException): + def __init__(self, patch_1, existing_patch_path): + self.patch_1 = patch_1 + self.existing_patch_path = existing_patch_path + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + package_name = self.patch_1.package_name + name = self.patch_1.name + fix = self._fix_dupe_msg( + self.patch_1.original_file_path, self.existing_patch_path, name, "macros" + ) + msg = ( + f"dbt found two schema.yml entries for the same macro in package " + f"{package_name} named {name}. Macros may only be described a single " + f"time. To fix this, {fix}" + ) + return msg + + +# core level exceptions +class DuplicateAlias(AliasException): + def __init__(self, kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str): + self.kwargs = kwargs + self.aliases = aliases + self.canonical_key = canonical_key + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + # dupe found: go through the dict so we can have a nice-ish error + key_names = ", ".join( + "{}".format(k) for k in self.kwargs if self.aliases.get(k) == self.canonical_key + ) + msg = f'Got duplicate keys: ({key_names}) all map to "{self.canonical_key}"' + return msg + + +# Postgres Exceptions + + +class UnexpectedDbReference(NotImplementedException): + def __init__(self, adapter, database, expected): + self.adapter = adapter + self.database = database + self.expected = expected + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"Cross-db references not allowed in {self.adapter} ({self.database} vs {self.expected})" + return msg + + +class CrossDbReferenceProhibited(CompilationException): + def __init__(self, adapter, exc_msg: str): + self.adapter = adapter + self.exc_msg = exc_msg + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"Cross-db references not allowed in adapter {self.adapter}: Got {self.exc_msg}" + return msg + + +class IndexConfigNotDict(CompilationException): + def __init__(self, raw_index: Any): + self.raw_index = raw_index + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Invalid index config:\n" + f" Got: {self.raw_index}\n" + f' Expected a dictionary with at minimum a "columns" key' + ) + return msg + + +class InvalidIndexConfig(CompilationException): + def __init__(self, exc: TypeError): + self.exc = exc + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + validator_msg = self.validator_error_message(self.exc) + msg = f"Could not parse index config: {validator_msg}" + return msg + + +# adapters exceptions +class InvalidMacroResult(CompilationException): + def __init__(self, freshness_macro_name: str, table): + self.freshness_macro_name = freshness_macro_name + self.table = table + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f'Got an invalid result from "{self.freshness_macro_name}" macro: {[tuple(r) for r in self.table]}' + + return msg + + +class SnapshotTargetNotSnapshotTable(CompilationException): + def __init__(self, missing: List): + self.missing = missing + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = 'Snapshot target is not a snapshot table (missing "{}")'.format( + '", "'.join(self.missing) + ) + return msg + + +class SnapshotTargetIncomplete(CompilationException): + def __init__(self, extra: List, missing: List): + self.extra = extra + self.missing = missing + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + 'Snapshot target has ("{}") but not ("{}") - is it an ' + "unmigrated previous version archive?".format( + '", "'.join(self.extra), '", "'.join(self.missing) + ) + ) + return msg + + +class RenameToNoneAttempted(CompilationException): + def __init__(self, src_name: str, dst_name: str, name: str): + self.src_name = src_name + self.dst_name = dst_name + self.name = name + self.msg = f"Attempted to rename {self.src_name} to {self.dst_name} for {self.name}" + super().__init__(msg=self.msg) + + +class NullRelationDropAttempted(CompilationException): + def __init__(self, name: str): + self.name = name + self.msg = f"Attempted to drop a null relation for {self.name}" + super().__init__(msg=self.msg) + + +class NullRelationCacheAttempted(CompilationException): + def __init__(self, name: str): + self.name = name + self.msg = f"Attempted to cache a null relation for {self.name}" + super().__init__(msg=self.msg) + + +class InvalidQuoteConfigType(CompilationException): + def __init__(self, quote_config: Any): + self.quote_config = quote_config + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + 'The seed configuration value of "quote_columns" has an ' + f"invalid type {type(self.quote_config)}" + ) + return msg + + +class MultipleDatabasesNotAllowed(CompilationException): + def __init__(self, databases): + self.databases = databases + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = str(self.databases) + return msg + + +class RelationTypeNull(CompilationException): + def __init__(self, relation): + self.relation = relation + self.msg = f"Tried to drop relation {self.relation}, but its type is null." + super().__init__(msg=self.msg) + + +class MaterializationNotAvailable(CompilationException): + def __init__(self, model, adapter_type: str): + self.model = model + self.adapter_type = adapter_type + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + materialization = self.model.get_materialization() + msg = f"Materialization '{materialization}' is not available for {self.adapter_type}!" + return msg + + +class RelationReturnedMultipleResults(CompilationException): + def __init__(self, kwargs: Mapping[str, Any], matches: List): + self.kwargs = kwargs + self.matches = matches + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + "get_relation returned more than one relation with the given args. " + "Please specify a database or schema to narrow down the result set." + f"\n{self.kwargs}\n\n{self.matches}" + ) + return msg + + +class ApproximateMatch(CompilationException): + def __init__(self, target, relation): + self.target = target + self.relation = relation + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = ( + "When searching for a relation, dbt found an approximate match. " + "Instead of guessing \nwhich relation to use, dbt will move on. " + f"Please delete {self.relation}, or rename it to be less ambiguous." + f"\nSearched for: {self.target}\nFound: {self.relation}" + ) + + return msg + + +# adapters exceptions +class UnexpectedNull(DatabaseException): + def __init__(self, field_name: str, source): + self.field_name = field_name + self.source = source + msg = ( + f"Expected a non-null value when querying field '{self.field_name}' of table " + f" {self.source} but received value 'null' instead" + ) + super().__init__(msg) + + +class UnexpectedNonTimestamp(DatabaseException): + def __init__(self, field_name: str, source, dt: Any): + self.field_name = field_name + self.source = source + self.type_name = type(dt).__name__ + msg = ( + f"Expected a timestamp value when querying field '{self.field_name}' of table " + f"{self.source} but received value of type '{self.type_name}' instead" + ) + super().__init__(msg) + + +# deps exceptions +class MultipleVersionGitDeps(DependencyException): + def __init__(self, git: str, requested): + self.git = git + self.requested = requested + msg = ( + "git dependencies should contain exactly one version. " + f"{self.git} contains: {self.requested}" + ) + super().__init__(msg) + + +class DuplicateProjectDependency(DependencyException): + def __init__(self, project_name: str): + self.project_name = project_name + msg = ( + f'Found duplicate project "{self.project_name}". This occurs when ' + "a dependency has the same project name as some other dependency." + ) + super().__init__(msg) + + +class DuplicateDependencyToRoot(DependencyException): + def __init__(self, project_name: str): + self.project_name = project_name + msg = ( + "Found a dependency with the same name as the root project " + f'"{self.project_name}". Package names must be unique in a project.' + " Please rename one of these packages." + ) + super().__init__(msg) + + +class MismatchedDependencyTypes(DependencyException): + def __init__(self, new, old): + self.new = new + self.old = old + msg = ( + f"Cannot incorporate {self.new} ({self.new.__class__.__name__}) in {self.old} " + f"({self.old.__class__.__name__}): mismatched types" + ) + super().__init__(msg) + + +class PackageVersionNotFound(DependencyException): + def __init__( + self, + package_name: str, + version_range, + available_versions: List[str], + should_version_check: bool, + ): + self.package_name = package_name + self.version_range = version_range + self.available_versions = available_versions + self.should_version_check = should_version_check + super().__init__(self.get_message()) + + def get_message(self) -> str: + base_msg = ( + "Could not find a matching compatible version for package {}\n" + " Requested range: {}\n" + " Compatible versions: {}\n" + ) + addendum = ( + ( + "\n" + " Not shown: package versions incompatible with installed version of dbt-core\n" + " To include them, run 'dbt --no-version-check deps'" + ) + if self.should_version_check + else "" + ) + msg = ( + base_msg.format(self.package_name, self.version_range, self.available_versions) + + addendum + ) + return msg + + +class PackageNotFound(DependencyException): + def __init__(self, package_name: str): + self.package_name = package_name + msg = f"Package {self.package_name} was not found in the package index" + super().__init__(msg) + + +# config level exceptions + + +class ProfileConfigInvalid(DbtProfileError): + def __init__(self, exc: ValidationError): + self.exc = exc + msg = self.validator_error_message(self.exc) + super().__init__(msg=msg) + + +class ProjectContractInvalid(DbtProjectError): + def __init__(self, exc: ValidationError): + self.exc = exc + msg = self.validator_error_message(self.exc) + super().__init__(msg=msg) + + +class ProjectContractBroken(DbtProjectError): + def __init__(self, exc: ValidationError): + self.exc = exc + msg = self.validator_error_message(self.exc) + super().__init__(msg=msg) + + +class ConfigContractBroken(DbtProjectError): + def __init__(self, exc: ValidationError): + self.exc = exc + msg = self.validator_error_message(self.exc) + super().__init__(msg=msg) + + +class NonUniquePackageName(CompilationException): + def __init__(self, project_name: str): + self.project_name = project_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + "dbt found more than one package with the name " + f'"{self.project_name}" included in this project. Package ' + "names must be unique in a project. Please rename " + "one of these packages." + ) + return msg + + +class UninstalledPackagesFound(CompilationException): + def __init__( + self, + count_packages_specified: int, + count_packages_installed: int, + packages_install_path: str, + ): + self.count_packages_specified = count_packages_specified + self.count_packages_installed = count_packages_installed + self.packages_install_path = packages_install_path + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"dbt found {self.count_packages_specified} package(s) " + "specified in packages.yml, but only " + f"{self.count_packages_installed} package(s) installed " + f'in {self.packages_install_path}. Run "dbt deps" to ' + "install package dependencies." + ) + return msg + + +class VarsArgNotYamlDict(CompilationException): + def __init__(self, var_type): + self.var_type = var_type + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + type_name = self.var_type.__name__ + + msg = f"The --vars argument must be a YAML dictionary, but was of type '{type_name}'" + return msg + + +# contracts level + + +class DuplicateMacroInPackage(CompilationException): + def __init__(self, macro, macro_mapping: Mapping): + self.macro = macro + self.macro_mapping = macro_mapping + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + other_path = self.macro_mapping[self.macro.unique_id].original_file_path + # subtract 2 for the "Compilation Error" indent + # note that the line wrap eats newlines, so if you want newlines, + # this is the result :( + msg = line_wrap_message( + f"""\ + dbt found two macros named "{self.macro.name}" in the project + "{self.macro.package_name}". + + + To fix this error, rename or remove one of the following + macros: + + - {self.macro.original_file_path} + + - {other_path} + """, + subtract=2, + ) + return msg + + +class DuplicateMaterializationName(CompilationException): + def __init__(self, macro, other_macro): + self.macro = macro + self.other_macro = other_macro + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + macro_name = self.macro.name + macro_package_name = self.macro.package_name + other_package_name = self.other_macro.macro.package_name + + msg = ( + f"Found two materializations with the name {macro_name} (packages " + f"{macro_package_name} and {other_package_name}). dbt cannot resolve " + "this ambiguity" + ) + return msg + + +# jinja exceptions +class MissingConfig(CompilationException): + def __init__(self, unique_id: str, name: str): + self.unique_id = unique_id + self.name = name + msg = ( + f"Model '{self.unique_id}' does not define a required config parameter '{self.name}'." + ) + super().__init__(msg=msg) + + +class MissingMaterialization(CompilationException): + def __init__(self, model, adapter_type): + self.model = model + self.adapter_type = adapter_type + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + materialization = self.model.get_materialization() + + valid_types = "'default'" + + if self.adapter_type != "default": + valid_types = f"'default' and '{self.adapter_type}'" + + msg = f"No materialization '{materialization}' was found for adapter {self.adapter_type}! (searched types {valid_types})" + return msg + + +class MissingRelation(CompilationException): + def __init__(self, relation, model=None): + self.relation = relation + self.model = model + msg = f"Relation {self.relation} not found!" + super().__init__(msg=msg) + + +class AmbiguousAlias(CompilationException): + def __init__(self, node_1, node_2, duped_name=None): + self.node_1 = node_1 + self.node_2 = node_2 + if duped_name is None: + self.duped_name = f"{self.node_1.database}.{self.node_1.schema}.{self.node_1.alias}" + else: + self.duped_name = duped_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = ( + f'dbt found two resources with the database representation "{self.duped_name}".\ndbt ' + "cannot create two resources with identical database representations. " + "To fix this,\nchange the configuration of one of these resources:" + f"\n- {self.node_1.unique_id} ({self.node_1.original_file_path})\n- {self.node_2.unique_id} ({self.node_2.original_file_path})" + ) + return msg + + +class AmbiguousCatalogMatch(CompilationException): + def __init__(self, unique_id: str, match_1, match_2): + self.unique_id = unique_id + self.match_1 = match_1 + self.match_2 = match_2 + super().__init__(msg=self.get_message()) + + def get_match_string(self, match): + match_schema = match.get("metadata", {}).get("schema") + match_name = match.get("metadata", {}).get("name") + return f"{match_schema}.{match_name}" + + def get_message(self) -> str: + msg = ( + "dbt found two relations in your warehouse with similar database identifiers. " + "dbt\nis unable to determine which of these relations was created by the model " + f'"{self.unique_id}".\nIn order for dbt to correctly generate the catalog, one ' + "of the following relations must be deleted or renamed:\n\n - " + f"{self.get_match_string(self.match_1)}\n - {self.get_match_string(self.match_2)}" + ) + + return msg + + +class CacheInconsistency(InternalException): + def __init__(self, msg: str): + self.msg = msg + formatted_msg = f"Cache inconsistency detected: {self.msg}" + super().__init__(msg=formatted_msg) + + +class NewNameAlreadyInCache(CacheInconsistency): + def __init__(self, old_key: str, new_key: str): + self.old_key = old_key + self.new_key = new_key + msg = ( + f'in rename of "{self.old_key}" -> "{self.new_key}", new name is in the cache already' + ) + super().__init__(msg) + + +class ReferencedLinkNotCached(CacheInconsistency): + def __init__(self, referenced_key: str): + self.referenced_key = referenced_key + msg = f"in add_link, referenced link key {self.referenced_key} not in cache!" + super().__init__(msg) + + +class DependentLinkNotCached(CacheInconsistency): + def __init__(self, dependent_key: str): + self.dependent_key = dependent_key + msg = f"in add_link, dependent link key {self.dependent_key} not in cache!" + super().__init__(msg) + + +class TruncatedModelNameCausedCollision(CacheInconsistency): + def __init__(self, new_key, relations: Dict): + self.new_key = new_key + self.relations = relations + super().__init__(self.get_message()) + + def get_message(self) -> str: + # Tell user when collision caused by model names truncated during + # materialization. + match = re.search("__dbt_backup|__dbt_tmp$", self.new_key.identifier) + if match: + truncated_model_name_prefix = self.new_key.identifier[: match.start()] + message_addendum = ( + "\n\nName collisions can occur when the length of two " + "models' names approach your database's builtin limit. " + "Try restructuring your project such that no two models " + f"share the prefix '{truncated_model_name_prefix}'. " + "Then, clean your warehouse of any removed models." + ) + else: + message_addendum = "" + + msg = f"in rename, new key {self.new_key} already in cache: {list(self.relations.keys())}{message_addendum}" + + return msg + + +class NoneRelationFound(CacheInconsistency): + def __init__(self): + msg = "in get_relations, a None relation was found in the cache!" + super().__init__(msg) + + +# this is part of the context and also raised in dbt.contracts.relation.py +class DataclassNotDict(CompilationException): + def __init__(self, obj: Any): + self.obj = obj + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f'The object ("{self.obj}") was used as a dictionary. This ' + "capability has been removed from objects of this type." + ) + + return msg + + +class DependencyNotFound(CompilationException): + def __init__(self, node, node_description, required_pkg): + self.node = node + self.node_description = node_description + self.required_pkg = required_pkg + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Error while parsing {self.node_description}.\nThe required package " + f'"{self.required_pkg}" was not found. Is the package installed?\n' + "Hint: You may need to run `dbt deps`." + ) + + return msg + + +class DuplicatePatchPath(CompilationException): + def __init__(self, patch_1, existing_patch_path): + self.patch_1 = patch_1 + self.existing_patch_path = existing_patch_path + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + name = self.patch_1.name + fix = self._fix_dupe_msg( + self.patch_1.original_file_path, + self.existing_patch_path, + name, + "resource", + ) + msg = ( + f"dbt found two schema.yml entries for the same resource named " + f"{name}. Resources and their associated columns may only be " + f"described a single time. To fix this, {fix}" + ) + return msg + + +# should this inherit ParsingException instead? +class DuplicateResourceName(CompilationException): + def __init__(self, node_1, node_2): + self.node_1 = node_1 + self.node_2 = node_2 + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + duped_name = self.node_1.name + node_type = NodeType(self.node_1.resource_type) + pluralized = ( + node_type.pluralize() + if self.node_1.resource_type == self.node_2.resource_type + else "resources" # still raise if ref() collision, e.g. model + seed + ) + + action = "looking for" + # duplicate 'ref' targets + if node_type in NodeType.refable(): + formatted_name = f'ref("{duped_name}")' + # duplicate sources + elif node_type == NodeType.Source: + duped_name = self.node_1.get_full_source_name() + formatted_name = self.node_1.get_source_representation() + # duplicate docs blocks + elif node_type == NodeType.Documentation: + formatted_name = f'doc("{duped_name}")' + # duplicate generic tests + elif node_type == NodeType.Test and hasattr(self.node_1, "test_metadata"): + column_name = ( + f'column "{self.node_1.column_name}" in ' if self.node_1.column_name else "" + ) + model_name = self.node_1.file_key_name + duped_name = f'{self.node_1.name}" defined on {column_name}"{model_name}' + action = "running" + formatted_name = "tests" + # all other resource types + else: + formatted_name = duped_name + + msg = f""" +dbt found two {pluralized} with the name "{duped_name}". + +Since these resources have the same name, dbt will be unable to find the correct resource when {action} {formatted_name}. To fix this, change the name of one of these resources: -- {node_1.unique_id} ({node_1.original_file_path}) -- {node_2.unique_id} ({node_2.original_file_path}) +- {self.node_1.unique_id} ({self.node_1.original_file_path}) +- {self.node_2.unique_id} ({self.node_2.original_file_path}) """.strip() - ) + return msg -def raise_ambiguous_alias(node_1, node_2, duped_name=None): - if duped_name is None: - duped_name = f"{node_1.database}.{node_1.schema}.{node_1.alias}" - - raise_compiler_error( - 'dbt found two resources with the database representation "{}".\ndbt ' - "cannot create two resources with identical database representations. " - "To fix this,\nchange the configuration of one of these resources:" - "\n- {} ({})\n- {} ({})".format( - duped_name, - node_1.unique_id, - node_1.original_file_path, - node_2.unique_id, - node_2.original_file_path, +class InvalidPropertyYML(CompilationException): + def __init__(self, path: str, issue: str): + self.path = path + self.issue = issue + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"The yml property file at {self.path} is invalid because {self.issue}. " + "Please consult the documentation for more information on yml property file " + "syntax:\n\nhttps://docs.getdbt.com/reference/configs-and-properties" ) - ) + return msg -def raise_ambiguous_catalog_match(unique_id, match_1, match_2): - def get_match_string(match): - return "{}.{}".format( - match.get("metadata", {}).get("schema"), - match.get("metadata", {}).get("name"), - ) +class PropertyYMLMissingVersion(InvalidPropertyYML): + def __init__(self, path: str): + self.path = path + self.issue = f"the yml property file {self.path} is missing a version tag" + super().__init__(self.path, self.issue) - raise_compiler_error( - "dbt found two relations in your warehouse with similar database " - "identifiers. dbt\nis unable to determine which of these relations " - 'was created by the model "{unique_id}".\nIn order for dbt to ' - "correctly generate the catalog, one of the following relations must " - "be deleted or renamed:\n\n - {match_1_s}\n - {match_2_s}".format( - unique_id=unique_id, - match_1_s=get_match_string(match_1), - match_2_s=get_match_string(match_2), + +class PropertyYMLVersionNotInt(InvalidPropertyYML): + def __init__(self, path: str, version: Any): + self.path = path + self.version = version + self.issue = ( + "its 'version:' tag must be an integer (e.g. version: 2)." + f" {self.version} is not an integer" ) - ) + super().__init__(self.path, self.issue) -def raise_patch_targets_not_found(patches): - patch_list = "\n\t".join( - "model {} (referenced in path {})".format(p.name, p.original_file_path) - for p in patches.values() - ) - raise_compiler_error( - "dbt could not find models for the following patches:\n\t{}".format(patch_list) - ) +class PropertyYMLInvalidTag(InvalidPropertyYML): + def __init__(self, path: str, version: int): + self.path = path + self.version = version + self.issue = f"its 'version:' tag is set to {self.version}. Only 2 is supported" + super().__init__(self.path, self.issue) -def _fix_dupe_msg(path_1: str, path_2: str, name: str, type_name: str) -> str: - if path_1 == path_2: - return f"remove one of the {type_name} entries for {name} in this file:\n - {path_1!s}\n" - else: - return ( - f"remove the {type_name} entry for {name} in one of these files:\n" - f" - {path_1!s}\n{path_2!s}" +class RelationWrongType(CompilationException): + def __init__(self, relation, expected_type, model=None): + self.relation = relation + self.expected_type = expected_type + self.model = model + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Trying to create {self.expected_type} {self.relation}, " + f"but it currently exists as a {self.relation.type}. Either " + f"drop {self.relation} manually, or run dbt with " + "`--full-refresh` and dbt will drop it for you." ) + return msg + -def raise_duplicate_patch_name(patch_1, existing_patch_path): - name = patch_1.name - fix = _fix_dupe_msg( - patch_1.original_file_path, - existing_patch_path, - name, - "resource", - ) - raise_compiler_error( - f"dbt found two schema.yml entries for the same resource named " - f"{name}. Resources and their associated columns may only be " - f"described a single time. To fix this, {fix}" - ) +# These are copies of what's in dbt/context/exceptions_jinja.py to not immediately break adapters +# utilizing these functions as exceptions. These are direct copies to avoid circular imports. +# They will be removed in 1 (or 2?) versions. Issue to be created to ensure it happens. + +# TODO: add deprecation to functions +def warn(msg, node=None): + warn_or_error(JinjaLogWarning(msg=msg, node_info=get_node_info())) + return "" -def raise_duplicate_macro_patch_name(patch_1, existing_patch_path): - package_name = patch_1.package_name - name = patch_1.name - fix = _fix_dupe_msg(patch_1.original_file_path, existing_patch_path, name, "macros") - raise_compiler_error( - f"dbt found two schema.yml entries for the same macro in package " - f"{package_name} named {name}. Macros may only be described a single " - f"time. To fix this, {fix}" - ) +def missing_config(model, name) -> NoReturn: + raise MissingConfig(unique_id=model.unique_id, name=name) + + +def missing_materialization(model, adapter_type) -> NoReturn: + raise MissingMaterialization(model=model, adapter_type=adapter_type) + + +def missing_relation(relation, model=None) -> NoReturn: + raise MissingRelation(relation, model) + + +def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn: + raise AmbiguousAlias(node_1, node_2, duped_name) + + +def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn: + raise AmbiguousCatalogMatch(unique_id, match_1, match_2) + + +def raise_cache_inconsistent(message) -> NoReturn: + raise CacheInconsistency(message) + + +def raise_dataclass_not_dict(obj) -> NoReturn: + raise DataclassNotDict(obj) + + +# note: this is called all over the code in addition to in jinja +def raise_compiler_error(msg, node=None) -> NoReturn: + raise CompilationException(msg, node) + + +def raise_database_error(msg, node=None) -> NoReturn: + raise DatabaseException(msg, node) + + +def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn: + raise DependencyNotFound(node, node_description, required_pkg) + + +def raise_dependency_error(msg) -> NoReturn: + raise DependencyException(scrub_secrets(msg, env_secrets())) + + +def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn: + raise DuplicatePatchPath(patch_1, existing_patch_path) + + +def raise_duplicate_resource_name(node_1, node_2) -> NoReturn: + raise DuplicateResourceName(node_1, node_2) + + +def raise_invalid_property_yml_version(path, issue) -> NoReturn: + raise InvalidPropertyYML(path, issue) + + +def raise_not_implemented(msg) -> NoReturn: + raise NotImplementedException(msg) + + +def relation_wrong_type(relation, expected_type, model=None) -> NoReturn: + raise RelationWrongType(relation, expected_type, model) + + +# these were implemented in core so deprecating here by calling the new exception directly +def raise_duplicate_alias( + kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str +) -> NoReturn: + raise DuplicateAlias(kwargs, aliases, canonical_key) def raise_duplicate_source_patch_name(patch_1, patch_2): - name = f"{patch_1.overrides}.{patch_1.name}" - fix = _fix_dupe_msg( - patch_1.path, - patch_2.path, - name, - "sources", - ) - raise_compiler_error( - f"dbt found two schema.yml entries for the same source named " - f"{patch_1.name} in package {patch_1.overrides}. Sources may only be " - f"overridden a single time. To fix this, {fix}" - ) + raise DuplicateSourcePatchName(patch_1, patch_2) + + +def raise_duplicate_macro_patch_name(patch_1, existing_patch_path): + raise DuplicateMacroPatchName(patch_1, existing_patch_path) + + +def raise_duplicate_macro_name(node_1, node_2, namespace) -> NoReturn: + raise DuplicateMacroName(node_1, node_2, namespace) + + +def approximate_relation_match(target, relation): + raise ApproximateMatch(target, relation) -def raise_invalid_property_yml_version(path, issue): - raise_compiler_error( - "The yml property file at {} is invalid because {}. Please consult the " - "documentation for more information on yml property file syntax:\n\n" - "https://docs.getdbt.com/reference/configs-and-properties".format(path, issue) +def get_relation_returned_multiple_results(kwargs, matches): + raise RelationReturnedMultipleResults(kwargs, matches) + + +def system_error(operation_name): + # Note: This was converted for core to use SymbolicLinkError because it's the only way it was used. Maintaining flexibility here for now. + msg = ( + f"dbt encountered an error when attempting to {operation_name}. " + "If this error persists, please create an issue at: \n\n" + "https://github.com/dbt-labs/dbt-core" ) + raise CompilationException(msg) -def raise_unrecognized_credentials_type(typename, supported_types): - raise_compiler_error( - 'Unrecognized credentials type "{}" - supported types are ({})'.format( - typename, ", ".join('"{}"'.format(t) for t in supported_types) - ) +def invalid_materialization_argument(name, argument): + raise InvalidMaterializationArg(name, argument) + + +def bad_package_spec(repo, spec, error_message): + msg = f"Error checking out spec='{spec}' for repo {repo}\n{error_message}" + raise InternalException(scrub_secrets(msg, env_secrets())) + + +def raise_git_cloning_error(error: CommandResultError) -> NoReturn: + error.cmd = list(scrub_secrets(str(error.cmd), env_secrets())) + raise error + + +def raise_git_cloning_problem(repo) -> NoReturn: + raise GitCloningProblem(repo) + + +def macro_invalid_dispatch_arg(macro_name) -> NoReturn: + raise MacroInvalidDispatchArg(macro_name) + + +def dependency_not_found(node, dependency): + raise GraphDependencyNotFound(node, dependency) + + +def target_not_found( + node, + target_name: str, + target_kind: str, + target_package: Optional[str] = None, + disabled: Optional[bool] = None, +) -> NoReturn: + raise TargetNotFound( + node=node, + target_name=target_name, + target_kind=target_kind, + target_package=target_package, + disabled=disabled, ) -def warn_invalid_patch(patch, resource_type): - msg = line_wrap_message( - f"""\ - '{patch.name}' is a {resource_type} node, but it is - specified in the {patch.yaml_key} section of - {patch.original_file_path}. - To fix this error, place the `{patch.name}` - specification under the {resource_type.pluralize()} key instead. - """ +def doc_target_not_found( + model, target_doc_name: str, target_doc_package: Optional[str] +) -> NoReturn: + raise DocTargetNotFound( + node=model, target_doc_name=target_doc_name, target_doc_package=target_doc_package ) - warn_or_error(msg, log_fmt=warning_tag("{}")) -def raise_not_implemented(msg): - raise NotImplementedException("ERROR: {}".format(msg)) +def doc_invalid_args(model, args) -> NoReturn: + raise InvalidDocArgs(node=model, args=args) + +def ref_bad_context(model, args) -> NoReturn: + raise RefBadContext(node=model, args=args) + + +def metric_invalid_args(model, args) -> NoReturn: + raise MetricInvalidArgs(node=model, args=args) -def raise_duplicate_alias( - kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str -) -> NoReturn: - # dupe found: go through the dict so we can have a nice-ish error - key_names = ", ".join("{}".format(k) for k in kwargs if aliases.get(k) == canonical_key) - raise AliasException(f'Got duplicate keys: ({key_names}) all map to "{canonical_key}"') +def ref_invalid_args(model, args) -> NoReturn: + raise RefInvalidArgs(node=model, args=args) -def warn_or_error(msg, node=None, log_fmt=None): - if flags.WARN_ERROR: - raise_compiler_error(scrub_secrets(msg, env_secrets()), node) - else: - fire_event(GeneralWarningMsg(msg=msg, log_fmt=log_fmt)) +def invalid_bool_error(got_value, macro_name) -> NoReturn: + raise InvalidBoolean(return_value=got_value, macro_name=macro_name) -def warn_or_raise(exc, log_fmt=None): - if flags.WARN_ERROR: - raise exc - else: - fire_event(GeneralWarningException(exc=str(exc), log_fmt=log_fmt)) +def invalid_type_error(method_name, arg_name, got_value, expected_type) -> NoReturn: + """Raise a CompilationException when an adapter method available to macros + has changed. + """ + raise InvalidMacroArgType(method_name, arg_name, got_value, expected_type) -def warn(msg, node=None): - # there's no reason to expose log_fmt to macros - it's only useful for - # handling colors - warn_or_error(msg, node=node) - return "" +def disallow_secret_env_var(env_var_name) -> NoReturn: + """Raise an error when a secret env var is referenced outside allowed + rendering contexts""" + raise DisallowSecretEnvVar(env_var_name) + + +def raise_parsing_error(msg, node=None) -> NoReturn: + raise ParsingException(msg, node) + + +# These are the exceptions functions that were not called within dbt-core but will remain here but deprecated to give a chance to rework +# TODO: is this valid? Should I create a special exception class for this? +def raise_unrecognized_credentials_type(typename, supported_types): + msg = 'Unrecognized credentials type "{}" - supported types are ({})'.format( + typename, ", ".join('"{}"'.format(t) for t in supported_types) + ) + raise CompilationException(msg) + + +def raise_patch_targets_not_found(patches): + patch_list = "\n\t".join( + f"model {p.name} (referenced in path {p.original_file_path})" for p in patches.values() + ) + msg = f"dbt could not find models for the following patches:\n\t{patch_list}" + raise CompilationException(msg) + + +def multiple_matching_relations(kwargs, matches): + raise RelationReturnedMultipleResults(kwargs, matches) + + +# while this isn't in our code I wouldn't be surpised it's in adapter code +def materialization_not_available(model, adapter_type): + raise MaterializationNotAvailable(model, adapter_type) + + +def macro_not_found(model, target_macro_id): + msg = f"'{model.unique_id}' references macro '{target_macro_id}' which is not defined!" + raise CompilationException(msg=msg, node=model) -# Update this when a new function should be added to the -# dbt context's `exceptions` key! -CONTEXT_EXPORTS = { - fn.__name__: fn - for fn in [ - warn, - missing_config, - missing_materialization, - missing_relation, - raise_ambiguous_alias, - raise_ambiguous_catalog_match, - raise_cache_inconsistent, - raise_dataclass_not_dict, - raise_compiler_error, - raise_database_error, - raise_dep_not_found, - raise_dependency_error, - raise_duplicate_patch_name, - raise_duplicate_resource_name, - raise_invalid_property_yml_version, - raise_not_implemented, - relation_wrong_type, - ] -} - - -def wrapper(model): - def wrap(func): - @functools.wraps(func) - def inner(*args, **kwargs): - try: - return func(*args, **kwargs) - except RuntimeException as exc: - exc.add_node(model) - raise exc - - return inner - - return wrap - - -def wrapped_exports(model): - wrap = wrapper(model) - return {name: wrap(export) for name, export in CONTEXT_EXPORTS.items()} +# adapters use this to format messages. it should be deprecated but live on for now +def validator_error_message(exc): + """Given a dbt.dataclass_schema.ValidationError (which is basically a + jsonschema.ValidationError), return the relevant parts as a string + """ + if not isinstance(exc, dbt.dataclass_schema.ValidationError): + return str(exc) + path = "[%s]" % "][".join(map(repr, exc.relative_path)) + return "at path {}: {}".format(path, exc.message) diff --git a/core/dbt/flags.py b/core/dbt/flags.py index bff51c2b343..14e60c834c6 100644 --- a/core/dbt/flags.py +++ b/core/dbt/flags.py @@ -52,7 +52,6 @@ "PRINTER_WIDTH", "PROFILES_DIR", "INDIRECT_SELECTION", - "EVENT_BUFFER_SIZE", "TARGET_PATH", "LOG_PATH", ] @@ -73,11 +72,11 @@ "LOG_CACHE_EVENTS": False, "LOG_FORMAT": None, "LOG_PATH": None, + "QUIET": False, "NO_PRINT": False, "PARTIAL_PARSE": True, "PRINTER_WIDTH": 80, "PROFILES_DIR": DEFAULT_PROFILES_DIR, - "QUIET": False, "SEND_ANONYMOUS_USAGE_STATS": True, "STATIC_PARSER": True, "TARGET_PATH": None, @@ -115,6 +114,7 @@ def env_set_path(key: str) -> Optional[Path]: MACRO_DEBUGGING = env_set_truthy("DBT_MACRO_DEBUGGING") DEFER_MODE = env_set_truthy("DBT_DEFER_TO_STATE") +FAVOR_STATE_MODE = env_set_truthy("DBT_FAVOR_STATE_STATE") ARTIFACT_STATE_PATH = env_set_path("DBT_ARTIFACT_STATE_PATH") ENABLE_LEGACY_LOGGER = env_set_truthy("DBT_ENABLE_LEGACY_LOGGER") @@ -135,7 +135,7 @@ def set_from_args(args, user_config): global STRICT_MODE, FULL_REFRESH, WARN_ERROR, USE_EXPERIMENTAL_PARSER, STATIC_PARSER global WRITE_JSON, PARTIAL_PARSE, USE_COLORS, STORE_FAILURES, PROFILES_DIR, DEBUG, LOG_FORMAT global INDIRECT_SELECTION, VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS, ANONYMOUS_USAGE_STATS - global PRINTER_WIDTH, WHICH, LOG_CACHE_EVENTS, EVENT_BUFFER_SIZE, QUIET, NO_PRINT, CACHE_SELECTED_ONLY + global PRINTER_WIDTH, WHICH, LOG_CACHE_EVENTS, QUIET, NO_PRINT, CACHE_SELECTED_ONLY global TARGET_PATH, LOG_PATH STRICT_MODE = False # backwards compatibility @@ -148,7 +148,6 @@ def set_from_args(args, user_config): ANONYMOUS_USAGE_STATS = get_flag_value("ANONYMOUS_USAGE_STATS", args, user_config) CACHE_SELECTED_ONLY = get_flag_value("CACHE_SELECTED_ONLY", args, user_config) DEBUG = get_flag_value("DEBUG", args, user_config) - EVENT_BUFFER_SIZE = get_flag_value("EVENT_BUFFER_SIZE", args, user_config) FAIL_FAST = get_flag_value("FAIL_FAST", args, user_config) INDIRECT_SELECTION = get_flag_value("INDIRECT_SELECTION", args, user_config) LOG_CACHE_EVENTS = get_flag_value("LOG_CACHE_EVENTS", args, user_config) @@ -186,7 +185,7 @@ def _set_overrides_from_env(): def get_flag_value(flag, args, user_config): flag_value = _load_flag_value(flag, args, user_config) - if flag in ["PRINTER_WIDTH", "EVENT_BUFFER_SIZE"]: # must be ints + if flag == "PRINTER_WIDTH": # must be ints flag_value = int(flag_value) if flag == "PROFILES_DIR": flag_value = os.path.abspath(flag_value) @@ -248,7 +247,6 @@ def get_flag_dict(): "printer_width": PRINTER_WIDTH, "indirect_selection": INDIRECT_SELECTION, "log_cache_events": LOG_CACHE_EVENTS, - "event_buffer_size": EVENT_BUFFER_SIZE, "quiet": QUIET, "no_print": NO_PRINT, } diff --git a/core/dbt/graph/queue.py b/core/dbt/graph/queue.py index 56248409754..3c3b9625d27 100644 --- a/core/dbt/graph/queue.py +++ b/core/dbt/graph/queue.py @@ -5,8 +5,12 @@ from typing import Dict, Set, List, Generator, Optional from .graph import UniqueId -from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedExposure, ParsedMetric -from dbt.contracts.graph.compiled import GraphMemberNode +from dbt.contracts.graph.nodes import ( + SourceDefinition, + Exposure, + Metric, + GraphMemberNode, +) from dbt.contracts.graph.manifest import Manifest from dbt.node_types import NodeType @@ -48,7 +52,7 @@ def _include_in_cost(self, node_id: UniqueId) -> bool: if node.resource_type != NodeType.Model: return False # must be a Model - tell mypy this won't be a Source or Exposure or Metric - assert not isinstance(node, (ParsedSourceDefinition, ParsedExposure, ParsedMetric)) + assert not isinstance(node, (SourceDefinition, Exposure, Metric)) if node.is_ephemeral: return False return True diff --git a/core/dbt/graph/selector.py b/core/dbt/graph/selector.py index 49b73fc71c4..ed91596712b 100644 --- a/core/dbt/graph/selector.py +++ b/core/dbt/graph/selector.py @@ -5,15 +5,14 @@ from .selector_methods import MethodManager from .selector_spec import SelectionCriteria, SelectionSpec, IndirectSelection -from dbt.events.functions import fire_event -from dbt.events.types import SelectorReportInvalidSelector +from dbt.events.functions import fire_event, warn_or_error +from dbt.events.types import SelectorReportInvalidSelector, NoNodesForSelectionCriteria from dbt.node_types import NodeType from dbt.exceptions import ( InternalException, InvalidSelectorException, - warn_or_error, ) -from dbt.contracts.graph.compiled import GraphMemberNode +from dbt.contracts.graph.nodes import GraphMemberNode from dbt.contracts.graph.manifest import Manifest from dbt.contracts.state import PreviousState @@ -24,11 +23,6 @@ def get_package_names(nodes): return set([node.split(".")[1] for node in nodes]) -def alert_non_existence(raw_spec, nodes): - if len(nodes) == 0: - warn_or_error(f"The selection criterion '{str(raw_spec)}' does not match any nodes") - - def can_select_indirectly(node): """If a node is not selected itself, but its parent(s) are, it may qualify for indirect selection. @@ -142,8 +136,8 @@ def select_nodes_recursively(self, spec: SelectionSpec) -> Tuple[Set[UniqueId], direct_nodes = self.incorporate_indirect_nodes(initial_direct, indirect_nodes) - if spec.expect_exists: - alert_non_existence(spec.raw, direct_nodes) + if spec.expect_exists and len(direct_nodes) == 0: + warn_or_error(NoNodesForSelectionCriteria(spec_raw=str(spec.raw))) return direct_nodes, indirect_nodes @@ -223,7 +217,7 @@ def expand_selection( if can_select_indirectly(node): # should we add it in directly? if indirect_selection == IndirectSelection.Eager or set( - node.depends_on.nodes + node.depends_on_nodes ) <= set(selected): direct_nodes.add(unique_id) # if not: @@ -247,7 +241,7 @@ def incorporate_indirect_nodes( for unique_id in indirect_nodes: if unique_id in self.manifest.nodes: node = self.manifest.nodes[unique_id] - if set(node.depends_on.nodes) <= set(selected): + if set(node.depends_on_nodes) <= set(selected): selected.add(unique_id) return selected diff --git a/core/dbt/graph/selector_methods.py b/core/dbt/graph/selector_methods.py index 577cf825512..c77625649bc 100644 --- a/core/dbt/graph/selector_methods.py +++ b/core/dbt/graph/selector_methods.py @@ -7,20 +7,15 @@ from .graph import UniqueId -from dbt.contracts.graph.compiled import ( - CompiledSingularTestNode, - CompiledGenericTestNode, - CompileResultNode, - ManifestNode, -) from dbt.contracts.graph.manifest import Manifest, WritableManifest -from dbt.contracts.graph.parsed import ( - HasTestMetadata, - ParsedSingularTestNode, - ParsedExposure, - ParsedMetric, - ParsedGenericTestNode, - ParsedSourceDefinition, +from dbt.contracts.graph.nodes import ( + SingularTestNode, + Exposure, + Metric, + GenericTestNode, + SourceDefinition, + ResultNode, + ManifestNode, ) from dbt.contracts.state import PreviousState from dbt.exceptions import ( @@ -76,7 +71,7 @@ def is_selected_node(fqn: List[str], node_selector: str): return True -SelectorTarget = Union[ParsedSourceDefinition, ManifestNode, ParsedExposure, ParsedMetric] +SelectorTarget = Union[SourceDefinition, ManifestNode, Exposure, Metric] class SelectorMethod(metaclass=abc.ABCMeta): @@ -99,7 +94,7 @@ def parsed_nodes( def source_nodes( self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, ParsedSourceDefinition]]: + ) -> Iterator[Tuple[UniqueId, SourceDefinition]]: for key, source in self.manifest.sources.items(): unique_id = UniqueId(key) @@ -107,9 +102,7 @@ def source_nodes( continue yield unique_id, source - def exposure_nodes( - self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, ParsedExposure]]: + def exposure_nodes(self, included_nodes: Set[UniqueId]) -> Iterator[Tuple[UniqueId, Exposure]]: for key, exposure in self.manifest.exposures.items(): unique_id = UniqueId(key) @@ -117,9 +110,7 @@ def exposure_nodes( continue yield unique_id, exposure - def metric_nodes( - self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, ParsedMetric]]: + def metric_nodes(self, included_nodes: Set[UniqueId]) -> Iterator[Tuple[UniqueId, Metric]]: for key, metric in self.manifest.metrics.items(): unique_id = UniqueId(key) @@ -139,13 +130,13 @@ def all_nodes( def configurable_nodes( self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, CompileResultNode]]: + ) -> Iterator[Tuple[UniqueId, ResultNode]]: yield from chain(self.parsed_nodes(included_nodes), self.source_nodes(included_nodes)) def non_source_nodes( self, included_nodes: Set[UniqueId], - ) -> Iterator[Tuple[UniqueId, Union[ParsedExposure, ManifestNode, ParsedMetric]]]: + ) -> Iterator[Tuple[UniqueId, Union[Exposure, ManifestNode, Metric]]]: yield from chain( self.parsed_nodes(included_nodes), self.exposure_nodes(included_nodes), @@ -286,8 +277,6 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu root = Path.cwd() paths = set(p.relative_to(root) for p in root.glob(selector)) for node, real_node in self.all_nodes(included_nodes): - if Path(real_node.root_path) != root: - continue ofp = Path(real_node.original_file_path) if ofp in paths: yield node @@ -387,26 +376,26 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu class TestNameSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: for node, real_node in self.parsed_nodes(included_nodes): - if isinstance(real_node, HasTestMetadata): - if real_node.test_metadata.name == selector: + if real_node.resource_type == NodeType.Test and hasattr(real_node, "test_metadata"): + if real_node.test_metadata.name == selector: # type: ignore[union-attr] yield node class TestTypeSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: - search_types: Tuple[Type, ...] + search_type: Type # continue supporting 'schema' + 'data' for backwards compatibility if selector in ("generic", "schema"): - search_types = (ParsedGenericTestNode, CompiledGenericTestNode) + search_type = GenericTestNode elif selector in ("singular", "data"): - search_types = (ParsedSingularTestNode, CompiledSingularTestNode) + search_type = SingularTestNode else: raise RuntimeException( f'Invalid test type selector {selector}: expected "generic" or ' '"singular"' ) for node, real_node in self.parsed_nodes(included_nodes): - if isinstance(real_node, search_types): + if isinstance(real_node, search_type): yield node @@ -438,6 +427,9 @@ def _macros_modified(self) -> List[str]: return modified def recursively_check_macros_modified(self, node, visited_macros): + if not hasattr(node, "depends_on"): + return False + for macro_uid in node.depends_on.macros: if macro_uid in visited_macros: continue diff --git a/core/dbt/helper_types.py b/core/dbt/helper_types.py index eec26a20c64..a8ff90fa75f 100644 --- a/core/dbt/helper_types.py +++ b/core/dbt/helper_types.py @@ -3,7 +3,7 @@ # necessary for annotating constructors from __future__ import annotations -from dataclasses import dataclass +from dataclasses import dataclass, field from datetime import timedelta from pathlib import Path from typing import Tuple, AbstractSet, Union @@ -85,7 +85,7 @@ def __eq__(self, other): class NoValue(dbtClassMixin): """Sometimes, you want a way to say none that isn't None""" - novalue: NVEnum = NVEnum.novalue + novalue: NVEnum = field(default_factory=lambda: NVEnum.novalue) dbtClassMixin.register_field_encoders( diff --git a/core/dbt/include/global_project/macros/materializations/models/incremental/incremental.sql b/core/dbt/include/global_project/macros/materializations/models/incremental/incremental.sql index 602067616d2..e8ff5c1ea4f 100644 --- a/core/dbt/include/global_project/macros/materializations/models/incremental/incremental.sql +++ b/core/dbt/include/global_project/macros/materializations/models/incremental/incremental.sql @@ -50,9 +50,9 @@ {#-- Get the incremental_strategy, the macro to use for the strategy, and build the sql --#} {% set incremental_strategy = config.get('incremental_strategy') or 'default' %} - {% set incremental_predicates = config.get('incremental_predicates', none) %} + {% set incremental_predicates = config.get('predicates', none) or config.get('incremental_predicates', none) %} {% set strategy_sql_macro_func = adapter.get_incremental_strategy_macro(context, incremental_strategy) %} - {% set strategy_arg_dict = ({'target_relation': target_relation, 'temp_relation': temp_relation, 'unique_key': unique_key, 'dest_columns': dest_columns, 'predicates': incremental_predicates }) %} + {% set strategy_arg_dict = ({'target_relation': target_relation, 'temp_relation': temp_relation, 'unique_key': unique_key, 'dest_columns': dest_columns, 'incremental_predicates': incremental_predicates }) %} {% set build_sql = strategy_sql_macro_func(strategy_arg_dict) %} {% endif %} diff --git a/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql b/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql index 836d768d01a..5033178be49 100644 --- a/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql +++ b/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql @@ -1,9 +1,9 @@ -{% macro get_merge_sql(target, source, unique_key, dest_columns, predicates=none) -%} - {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, predicates) }} +{% macro get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} + {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, incremental_predicates) }} {%- endmacro %} -{% macro default__get_merge_sql(target, source, unique_key, dest_columns, predicates) -%} - {%- set predicates = [] if predicates is none else [] + predicates -%} +{% macro default__get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} + {%- set predicates = [] if incremental_predicates is none else [] + incremental_predicates -%} {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%} {%- set merge_update_columns = config.get('merge_update_columns') -%} {%- set merge_exclude_columns = config.get('merge_exclude_columns') -%} @@ -32,7 +32,7 @@ merge into {{ target }} as DBT_INTERNAL_DEST using {{ source }} as DBT_INTERNAL_SOURCE - on {{ predicates | join(' and ') }} + on {{"(" ~ predicates | join(") and (") ~ ")"}} {% if unique_key %} when matched then update set @@ -50,11 +50,11 @@ {% endmacro %} -{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%} - {{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns) }} +{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} + {{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns, incremental_predicates) }} {%- endmacro %} -{% macro default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%} +{% macro default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%} @@ -65,8 +65,13 @@ where ( {% for key in unique_key %} {{ source }}.{{ key }} = {{ target }}.{{ key }} - {{ "and " if not loop.last }} + {{ "and " if not loop.last}} {% endfor %} + {% if incremental_predicates %} + {% for predicate in incremental_predicates %} + and {{ predicate }} + {% endfor %} + {% endif %} ); {% else %} delete from {{ target }} @@ -74,7 +79,12 @@ {{ unique_key }}) in ( select ({{ unique_key }}) from {{ source }} - ); + ) + {%- if incremental_predicates %} + {% for predicate in incremental_predicates %} + and {{ predicate }} + {% endfor %} + {%- endif -%}; {% endif %} {% endif %} diff --git a/core/dbt/include/global_project/macros/materializations/models/incremental/strategies.sql b/core/dbt/include/global_project/macros/materializations/models/incremental/strategies.sql index 5226d01de16..72082ccad32 100644 --- a/core/dbt/include/global_project/macros/materializations/models/incremental/strategies.sql +++ b/core/dbt/include/global_project/macros/materializations/models/incremental/strategies.sql @@ -21,7 +21,7 @@ {% macro default__get_incremental_delete_insert_sql(arg_dict) %} - {% do return(get_delete_insert_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["unique_key"], arg_dict["dest_columns"])) %} + {% do return(get_delete_insert_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["unique_key"], arg_dict["dest_columns"], arg_dict["incremental_predicates"])) %} {% endmacro %} @@ -35,7 +35,7 @@ {% macro default__get_incremental_merge_sql(arg_dict) %} - {% do return(get_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["unique_key"], arg_dict["dest_columns"])) %} + {% do return(get_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["unique_key"], arg_dict["dest_columns"], arg_dict["incremental_predicates"])) %} {% endmacro %} @@ -48,7 +48,7 @@ {% macro default__get_incremental_insert_overwrite_sql(arg_dict) %} - {% do return(get_insert_overwrite_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["dest_columns"], arg_dict["predicates"])) %} + {% do return(get_insert_overwrite_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["dest_columns"], arg_dict["incremental_predicates"])) %} {% endmacro %} diff --git a/core/dbt/include/global_project/macros/python_model/python.sql b/core/dbt/include/global_project/macros/python_model/python.sql index 2155662987e..c56ff7f31c8 100644 --- a/core/dbt/include/global_project/macros/python_model/python.sql +++ b/core/dbt/include/global_project/macros/python_model/python.sql @@ -30,12 +30,13 @@ def source(*args, dbt_load_df_function): {% macro build_config_dict(model) %} {%- set config_dict = {} -%} - {%- for key in model.config.config_keys_used -%} + {% set config_dbt_used = zip(model.config.config_keys_used, model.config.config_keys_defaults) | list %} + {%- for key, default in config_dbt_used -%} {# weird type testing with enum, would be much easier to write this logic in Python! #} {%- if key == 'language' -%} {%- set value = 'python' -%} {%- endif -%} - {%- set value = model.config[key] -%} + {%- set value = model.config.get(key, default) -%} {%- do config_dict.update({key: value}) -%} {%- endfor -%} config_dict = {{ config_dict }} diff --git a/core/dbt/include/index.html b/core/dbt/include/index.html index 182b6b49f99..65749e446d0 100644 --- a/core/dbt/include/index.html +++ b/core/dbt/include/index.html @@ -90,7 +90,7 @@ https://github.com/jquery/jquery/blob/master/src/event.js */var r=function(e,t){this.recycle(e,t)};function i(){return!1}function o(){return!0}r.prototype={instanceString:function(){return"event"},recycle:function(e,t){if(this.isImmediatePropagationStopped=this.isPropagationStopped=this.isDefaultPrevented=i,null!=e&&e.preventDefault?(this.type=e.type,this.isDefaultPrevented=e.defaultPrevented?o:i):null!=e&&e.type?t=e:this.type=e,null!=t&&(this.originalEvent=t.originalEvent,this.type=null!=t.type?t.type:this.type,this.cy=t.cy,this.target=t.target,this.position=t.position,this.renderedPosition=t.renderedPosition,this.namespace=t.namespace,this.layout=t.layout),null!=this.cy&&null!=this.position&&null==this.renderedPosition){var n=this.position,r=this.cy.zoom(),a=this.cy.pan();this.renderedPosition={x:n.x*r+a.x,y:n.y*r+a.y}}this.timeStamp=e&&e.timeStamp||Date.now()},preventDefault:function(){this.isDefaultPrevented=o;var e=this.originalEvent;e&&e.preventDefault&&e.preventDefault()},stopPropagation:function(){this.isPropagationStopped=o;var e=this.originalEvent;e&&e.stopPropagation&&e.stopPropagation()},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=o,this.stopPropagation()},isDefaultPrevented:i,isPropagationStopped:i,isImmediatePropagationStopped:i},e.exports=r},function(e,t,n){"use strict";var r=n(1);e.exports=function(e,t){var n=e.cy().hasCompoundNodes();function i(e){var t=e.pstyle("z-compound-depth");return"auto"===t.value?n?e.zDepth():0:"bottom"===t.value?-1:"top"===t.value?r.MAX_INT:0}var o=i(e)-i(t);if(0!==o)return o;function a(e){return"auto"===e.pstyle("z-index-compare").value&&e.isNode()?1:0}var s=a(e)-a(t);if(0!==s)return s;var l=e.pstyle("z-index").value-t.pstyle("z-index").value;return 0!==l?l:e.poolIndex()-t.poolIndex()}},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(6),a=function e(t){if(!(this instanceof e))return new e(t);r.core(t)?(this._private={cy:t,coreStyle:{}},this.length=0,this.resetToDefault()):i.error("A style must have a core reference")},s=a.prototype;s.instanceString=function(){return"style"},s.clear=function(){for(var e=0;e=e.deqFastCost*m)break}else if(o){if(h>=e.deqCost*c||h>=e.deqAvgCost*l)break}else if(g>=e.deqNoDrawCost*(1e3/60))break;var v=e.deq(t,f,d);if(!(v.length>0))break;for(var b=0;b0&&(e.onDeqd(t,u),!o&&e.shouldRedraw(t,u,f,d)&&i())}),o(t))}}}}},function(e,t,n){"use strict";var r=n(0),i=n(12),o=n(94),a=n(136),s=function(e){return void 0===e&&(e={}),r.plainObject(e)?new i(e):r.string(e)?o.apply(o,arguments):void 0};s.use=function(e){var t=Array.prototype.slice.call(arguments,1);return t.unshift(s),e.apply(null,t),this},s.version=n(137),s.stylesheet=s.Stylesheet=a,e.exports=s},function(e,t,n){"use strict";var r=n(0);e.exports={hex2tuple:function(e){if((4===e.length||7===e.length)&&"#"===e[0]){var t=void 0,n=void 0,r=void 0;return 4===e.length?(t=parseInt(e[1]+e[1],16),n=parseInt(e[2]+e[2],16),r=parseInt(e[3]+e[3],16)):(t=parseInt(e[1]+e[2],16),n=parseInt(e[3]+e[4],16),r=parseInt(e[5]+e[6],16)),[t,n,r]}},hsl2tuple:function(e){var t=void 0,n=void 0,r=void 0,i=void 0,o=void 0,a=void 0,s=void 0,l=void 0;function c(e,t,n){return n<0&&(n+=1),n>1&&(n-=1),n<1/6?e+6*(t-e)*n:n<.5?t:n<2/3?e+(t-e)*(2/3-n)*6:e}var u=new RegExp("^"+this.regex.hsla+"$").exec(e);if(u){if((n=parseInt(u[1]))<0?n=(360- -1*n%360)%360:n>360&&(n%=360),n/=360,(r=parseFloat(u[2]))<0||r>100)return;if(r/=100,(i=parseFloat(u[3]))<0||i>100)return;if(i/=100,void 0!==(o=u[4])&&((o=parseFloat(o))<0||o>1))return;if(0===r)a=s=l=Math.round(255*i);else{var d=i<.5?i*(1+r):i+r-i*r,f=2*i-d;a=Math.round(255*c(f,d,n+1/3)),s=Math.round(255*c(f,d,n)),l=Math.round(255*c(f,d,n-1/3))}t=[a,s,l,o]}return t},rgb2tuple:function(e){var t=void 0,n=new RegExp("^"+this.regex.rgba+"$").exec(e);if(n){t=[];for(var r=[],i=1;i<=3;i++){var o=n[i];if("%"===o[o.length-1]&&(r[i]=!0),o=parseFloat(o),r[i]&&(o=o/100*255),o<0||o>255)return;t.push(Math.floor(o))}var a=r[1]||r[2]||r[3],s=r[1]&&r[2]&&r[3];if(a&&!s)return;var l=n[4];if(void 0!==l){if((l=parseFloat(l))<0||l>1)return;t.push(l)}}return t},colorname2tuple:function(e){return this.colors[e.toLowerCase()]},color2tuple:function(e){return(r.array(e)?e:null)||this.colorname2tuple(e)||this.hex2tuple(e)||this.rgb2tuple(e)||this.hsl2tuple(e)},colors:{transparent:[0,0,0,0],aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],grey:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]}}},function(e,t,n){"use strict";var r=n(0);e.exports={mapEmpty:function(e){return null==e||0===Object.keys(e).length},pushMap:function(e){var t=this.getMap(e);null==t?this.setMap(this.extend({},e,{value:[e.value]})):t.push(e.value)},setMap:function(e){for(var t=e.map,n=e.keys,i=n.length,o=0;ot?1:0}e.exports={sort:{ascending:r,descending:function(e,t){return-1*r(e,t)}}}},function(e,t,n){"use strict";function r(){this._obj={}}var i=r.prototype;i.set=function(e,t){this._obj[e]=t},i.delete=function(e){this._obj[e]=null},i.has=function(e){return null!=this._obj[e]},i.get=function(e){return this._obj[e]},e.exports=r},function(e,t,n){"use strict";var r=n(1),i={};[n(30),n(31),n(33),n(34),n(35),n(36),n(37),n(38),n(39),n(40),n(41)].forEach((function(e){r.extend(i,e)})),e.exports=i},function(e,t,n){"use strict";var r=n(0),i=function(e){return e={bfs:e.bfs||!e.dfs,dfs:e.dfs||!e.bfs},function(t,n,i){var o;r.plainObject(t)&&!r.elementOrCollection(t)&&(t=(o=t).roots||o.root,n=o.visit,i=o.directed),i=2!==arguments.length||r.fn(n)?i:n,n=r.fn(n)?n:function(){};for(var a,s=this._private.cy,l=t=r.string(t)?this.filter(t):t,c=[],u=[],d={},f={},p={},h=0,g=this.nodes(),m=this.edges(),v=0;v0;){var y=g.pop(),x=p(y),w=y.id();if(u[w]=x,x!==1/0){var k=y.neighborhood().intersect(f);for(m=0;m0)for(n.unshift(t);c[i.id()];){var o=c[i.id()];n.unshift(o.edge),n.unshift(o.node),i=o.node}return a.collection(n)}}}};e.exports=o},function(e,t){e.exports=n},function(e,t,n){"use strict";var r=n(0),i={kruskal:function(e){var t=this.cy();function n(e){for(var t=0;t0;){var y=n(p,v),x=i.getElementById(p[y]),w=x.id();if(b++,w==d){var k=t(u,d,h,[]);return{found:!0,distance:m[w],path:this.spawn(k),steps:b}}f.push(w),p.splice(y,1);for(var A=x._private.edges,E=0;Eb&&(u[m][v]=b,p[m][v]=v,h[m][v]=o[c])}if(!i)for(c=0;cb&&(u[m][v]=b,p[m][v]=v,h[m][v]=o[c]);for(var y=0;yu&&(u=t)},f=function(e){return c[e]},p=0;p0?S.edgesTo(E)[0]:E.edgesTo(S)[0]);E=E.id(),y[E]>y[k]+$&&(y[E]=y[k]+$,x.nodes.indexOf(E)<0?x.push(E):x.updateItem(E),b[E]=0,v[E]=[]),y[E]==y[k]+$&&(b[E]=b[E]+b[k],v[E].push(k))}else for(A=0;A0;)for(E=m.pop(),A=0;A0:void 0}},clearQueue:function(){return function(){var e=void 0!==this.length?this:[this];if(!(this._private.cy||this).styleEnabled())return this;for(var t=0;t0&&this.spawn(n).updateStyle().emit("class"),t},addClass:function(e){return this.toggleClass(e,!0)},hasClass:function(e){var t=this[0];return null!=t&&t._private.classes.has(e)},toggleClass:function(e,t){for(var n=e.match(/\S+/g)||[],r=[],i=0,o=this.length;i0&&this.spawn(r).updateStyle().emit("class"),this},removeClass:function(e){return this.toggleClass(e,!1)},flashClass:function(e,t){var n=this;if(null==t)t=250;else if(0===t)return n;return n.addClass(e),setTimeout((function(){n.removeClass(e)}),t),n}};e.exports=i},function(e,t,n){"use strict";n(0);var r=n(6),i={allAre:function(e){var t=new r(e);return this.every((function(e){return t.matches(e)}))},is:function(e){var t=new r(e);return this.some((function(e){return t.matches(e)}))},some:function(e,t){for(var n=0;n\\?\\@\\[\\]\\^\\`\\{\\|\\}\\~]",comparatorOp:"=|\\!=|>|>=|<|<=|\\$=|\\^=|\\*=",boolOp:"\\?|\\!|\\^",string:"\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'",number:n(1).regex.number,meta:"degree|indegree|outdegree",separator:"\\s*,\\s*",descendant:"\\s+",child:"\\s+>\\s+",subject:"\\$",group:"node|edge|\\*",directedEdge:"\\s+->\\s+",undirectedEdge:"\\s+<->\\s+"};r.variable="(?:[\\w-]|(?:\\\\"+r.metaChar+"))+",r.value=r.string+"|"+r.number,r.className=r.variable,r.id=r.variable,function(){var e=void 0,t=void 0,n=void 0;for(e=r.comparatorOp.split("|"),n=0;n=0||"="!==t&&(r.comparatorOp+="|\\!"+t)}(),e.exports=r},function(e,t,n){"use strict";var r=n(15).stateSelectorMatches,i=n(0),o=function(e,t){for(var n=!0,r=0;r=0&&(d=d.toLowerCase(),f=f.toLowerCase(),a=a.replace("@",""),p=!0);var h=!1;a.indexOf("!")>=0&&(a=a.replace("!",""),h=!0),p&&(s=f.toLowerCase(),u=d.toLowerCase());var g=!1;switch(a){case"*=":c=d.indexOf(f)>=0;break;case"$=":c=d.indexOf(f,d.length-f.length)>=0;break;case"^=":c=0===d.indexOf(f);break;case"=":c=u===s;break;case">":g=!0,c=u>s;break;case">=":g=!0,c=u>=s;break;case"<":g=!0,c=u0;){var u=o.shift();t(u),a.add(u.id()),s&&i(o,a,u)}return e}function a(e,t,n){if(n.isParent())for(var r=n._private.children,i=0;i1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,a)},i.forEachUp=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,s)},i.forEachUpAndDown=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,l)},i.ancestors=i.parents,e.exports=i},function(e,t,n){"use strict";var r,i=n(4),o=void 0;(o=r={data:i.data({field:"data",bindingEvent:"data",allowBinding:!0,allowSetting:!0,settingEvent:"data",settingTriggersEvent:!0,triggerFnName:"trigger",allowGetting:!0,immutableKeys:{id:!0,source:!0,target:!0,parent:!0},updateStyle:!0}),removeData:i.removeData({field:"data",event:"data",triggerFnName:"trigger",triggerEvent:!0,immutableKeys:{id:!0,source:!0,target:!0,parent:!0},updateStyle:!0}),scratch:i.data({field:"scratch",bindingEvent:"scratch",allowBinding:!0,allowSetting:!0,settingEvent:"scratch",settingTriggersEvent:!0,triggerFnName:"trigger",allowGetting:!0,updateStyle:!0}),removeScratch:i.removeData({field:"scratch",event:"scratch",triggerFnName:"trigger",triggerEvent:!0,updateStyle:!0}),rscratch:i.data({field:"rscratch",allowBinding:!1,allowSetting:!0,settingTriggersEvent:!1,allowGetting:!0}),removeRscratch:i.removeData({field:"rscratch",triggerEvent:!1}),id:function(){var e=this[0];if(e)return e._private.data.id}}).attr=o.data,o.removeAttr=o.removeData,e.exports=r},function(e,t,n){"use strict";var r=n(1),i={};function o(e){return function(t){if(void 0===t&&(t=!0),0!==this.length&&this.isNode()&&!this.removed()){for(var n=0,r=this[0],i=r._private.edges,o=0;ot})),minIndegree:a("indegree",(function(e,t){return et})),minOutdegree:a("outdegree",(function(e,t){return et}))}),r.extend(i,{totalDegree:function(e){for(var t=0,n=this.nodes(),r=0;r0,d=u;u&&(c=c[0]);var f=d?c.position():{x:0,y:0};return i={x:l.x-f.x,y:l.y-f.y},void 0===e?i:i[e]}for(var p=0;p0,v=m;m&&(g=g[0]);var b=v?g.position():{x:0,y:0};void 0!==t?h.position(e,t+b[e]):void 0!==i&&h.position({x:i.x+b.x,y:i.y+b.y})}}else if(!a)return;return this}}).modelPosition=s.point=s.position,s.modelPositions=s.points=s.positions,s.renderedPoint=s.renderedPosition,s.relativePoint=s.relativePosition,e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(2),a=void 0,s=void 0;a=s={},s.renderedBoundingBox=function(e){var t=this.boundingBox(e),n=this.cy(),r=n.zoom(),i=n.pan(),o=t.x1*r+i.x,a=t.x2*r+i.x,s=t.y1*r+i.y,l=t.y2*r+i.y;return{x1:o,x2:a,y1:s,y2:l,w:a-o,h:l-s}},s.dirtyCompoundBoundsCache=function(){var e=this.cy();return e.styleEnabled()&&e.hasCompoundNodes()?(this.forEachUp((function(e){e._private.compoundBoundsClean=!1,e.isParent()&&e.emit("bounds")})),this):this},s.updateCompoundBounds=function(){var e=this.cy();if(!e.styleEnabled()||!e.hasCompoundNodes())return this;if(e.batching())return this;var t=[];function n(e){if(e.isParent()){var n=e._private,r=e.children(),i="include"===e.pstyle("compound-sizing-wrt-labels").value,o={width:{val:e.pstyle("min-width").pfValue,left:e.pstyle("min-width-bias-left"),right:e.pstyle("min-width-bias-right")},height:{val:e.pstyle("min-height").pfValue,top:e.pstyle("min-height-bias-top"),bottom:e.pstyle("min-height-bias-bottom")}},a=r.boundingBox({includeLabels:i,includeOverlays:!1,useCache:!1}),s=n.position;0!==a.w&&0!==a.h||((a={w:e.pstyle("width").pfValue,h:e.pstyle("height").pfValue}).x1=s.x-a.w/2,a.x2=s.x+a.w/2,a.y1=s.y-a.h/2,a.y2=s.y+a.h/2);var l=o.width.left.value;"px"===o.width.left.units&&o.width.val>0&&(l=100*l/o.width.val);var c=o.width.right.value;"px"===o.width.right.units&&o.width.val>0&&(c=100*c/o.width.val);var u=o.height.top.value;"px"===o.height.top.units&&o.height.val>0&&(u=100*u/o.height.val);var d=o.height.bottom.value;"px"===o.height.bottom.units&&o.height.val>0&&(d=100*d/o.height.val);var f=b(o.width.val-a.w,l,c),p=f.biasDiff,h=f.biasComplementDiff,g=b(o.height.val-a.h,u,d),m=g.biasDiff,v=g.biasComplementDiff;n.autoPadding=function(e,t,n,r){if("%"!==n.units)return"px"===n.units?n.pfValue:0;switch(r){case"width":return e>0?n.pfValue*e:0;case"height":return t>0?n.pfValue*t:0;case"average":return e>0&&t>0?n.pfValue*(e+t)/2:0;case"min":return e>0&&t>0?e>t?n.pfValue*t:n.pfValue*e:0;case"max":return e>0&&t>0?e>t?n.pfValue*e:n.pfValue*t:0;default:return 0}}(a.w,a.h,e.pstyle("padding"),e.pstyle("padding-relative-to").value),n.autoWidth=Math.max(a.w,o.width.val),s.x=(-p+a.x1+a.x2+h)/2,n.autoHeight=Math.max(a.h,o.height.val),s.y=(-m+a.y1+a.y2+v)/2,t.push(e)}function b(e,t,n){var r=0,i=0,o=t+n;return e>0&&o>0&&(r=t/o*e,i=n/o*e),{biasDiff:r,biasComplementDiff:i}}}for(var r=0;re.x2?r:e.x2,e.y1=ne.y2?i:e.y2)},u=function(e,t,n){return i.getPrefixedProperty(e,t,n)},d=function(e,t,n){if(!t.cy().headless()){var r=t._private.rstyle,i=r.arrowWidth/2,o=void 0,a=void 0;"none"!==t.pstyle(n+"-arrow-shape").value&&("source"===n?(o=r.srcX,a=r.srcY):"target"===n?(o=r.tgtX,a=r.tgtY):(o=r.midX,a=r.midY),c(e,o-i,a-i,o+i,a+i))}},f=function(e,t,n){if(!t.cy().headless()){var r=void 0;r=n?n+"-":"";var i=t._private,o=i.rstyle;if(t.pstyle(r+"label").strValue){var a=t.pstyle("text-halign"),s=t.pstyle("text-valign"),l=u(o,"labelWidth",n),d=u(o,"labelHeight",n),f=u(o,"labelX",n),p=u(o,"labelY",n),h=t.pstyle(r+"text-margin-x").pfValue,g=t.pstyle(r+"text-margin-y").pfValue,m=t.isEdge(),v=t.pstyle(r+"text-rotation"),b=t.pstyle("text-outline-width").pfValue,y=t.pstyle("text-border-width").pfValue/2,x=t.pstyle("text-background-padding").pfValue,w=d+2*x,k=l+2*x,A=k/2,E=w/2,S=void 0,$=void 0,C=void 0,_=void 0;if(m)S=f-A,$=f+A,C=p-E,_=p+E;else{switch(a.value){case"left":S=f-k,$=f;break;case"center":S=f-A,$=f+A;break;case"right":S=f,$=f+k}switch(s.value){case"top":C=p-w,_=p;break;case"center":C=p-E,_=p+E;break;case"bottom":C=p,_=p+w}}var O=m&&"autorotate"===v.strValue,j=null!=v.pfValue&&0!==v.pfValue;if(O||j){var T=O?u(i.rstyle,"labelAngle",n):v.pfValue,P=Math.cos(T),D=Math.sin(T),R=function(e,t){return{x:(e-=f)*P-(t-=p)*D+f,y:e*D+t*P+p}},I=R(S,C),N=R(S,_),M=R($,C),z=R($,_);S=Math.min(I.x,N.x,M.x,z.x),$=Math.max(I.x,N.x,M.x,z.x),C=Math.min(I.y,N.y,M.y,z.y),_=Math.max(I.y,N.y,M.y,z.y)}S+=h-Math.max(b,y),$+=h+Math.max(b,y),C+=g-Math.max(b,y),_+=g+Math.max(b,y),c(e,S,C,$,_)}return e}},p=function(e){return e?"t":"f"},h=function(e){var t="";return t+=p(e.incudeNodes),t+=p(e.includeEdges),t+=p(e.includeLabels),t+=p(e.includeOverlays)},g=function(e,t){var n=e._private,r=void 0,i=e.cy().headless(),a=t===m?v:h(t);return t.useCache&&!i&&n.bbCache&&n.bbCache[a]?r=n.bbCache[a]:(r=function(e,t){var n=e._private.cy,r=n.styleEnabled(),i=n.headless(),a={x1:1/0,y1:1/0,x2:-1/0,y2:-1/0},s=e._private,u=r?e.pstyle("display").value:"element",p=e.isNode(),h=e.isEdge(),g=void 0,m=void 0,v=void 0,b=void 0,y=void 0,x=void 0,w="none"!==u;if(w){var k=0;r&&t.includeOverlays&&0!==e.pstyle("overlay-opacity").value&&(k=e.pstyle("overlay-padding").value);var A=0;if(r&&(A=e.pstyle("width").pfValue/2),p&&t.includeNodes){var E=e.position();y=E.x,x=E.y;var S=e.outerWidth()/2,$=e.outerHeight()/2;c(a,g=y-S-k,v=x-$-k,m=y+S+k,b=x+$+k)}else if(h&&t.includeEdges){var C=s.rstyle||{};if(r&&!i&&(g=Math.min(C.srcX,C.midX,C.tgtX),m=Math.max(C.srcX,C.midX,C.tgtX),v=Math.min(C.srcY,C.midY,C.tgtY),b=Math.max(C.srcY,C.midY,C.tgtY),c(a,g-=A,v-=A,m+=A,b+=A)),r&&!i&&"haystack"===e.pstyle("curve-style").strValue){var _=C.haystackPts||[];if(g=_[0].x,v=_[0].y,g>(m=_[1].x)){var O=g;g=m,m=O}if(v>(b=_[1].y)){var j=v;v=b,b=j}c(a,g-A,v-A,m+A,b+A)}else{for(var T=C.bezierPts||C.linePts||[],P=0;P(m=I.x)){var N=g;g=m,m=N}if((v=R.y)>(b=I.y)){var M=v;v=b,b=M}c(a,g-=A,v-=A,m+=A,b+=A)}}}if(r&&t.includeEdges&&h&&(d(a,e,"mid-source"),d(a,e,"mid-target"),d(a,e,"source"),d(a,e,"target")),r&&"yes"===e.pstyle("ghost").value){var z=e.pstyle("ghost-offset-x").pfValue,L=e.pstyle("ghost-offset-y").pfValue;c(a,a.x1+z,a.y1+L,a.x2+z,a.y2+L)}r&&(g=a.x1,m=a.x2,v=a.y1,b=a.y2,c(a,g-k,v-k,m+k,b+k)),r&&t.includeLabels&&(f(a,e,null),h&&(f(a,e,"source"),f(a,e,"target")))}return a.x1=l(a.x1),a.y1=l(a.y1),a.x2=l(a.x2),a.y2=l(a.y2),a.w=l(a.x2-a.x1),a.h=l(a.y2-a.y1),a.w>0&&a.h>0&&w&&o.expandBoundingBox(a,1),a}(e,t),i||(n.bbCache=n.bbCache||{},n.bbCache[a]=r)),r},m={includeNodes:!0,includeEdges:!0,includeLabels:!0,includeOverlays:!0,useCache:!0},v=h(m);function b(e){return{includeNodes:i.default(e.includeNodes,m.includeNodes),includeEdges:i.default(e.includeEdges,m.includeEdges),includeLabels:i.default(e.includeLabels,m.includeLabels),includeOverlays:i.default(e.includeOverlays,m.includeOverlays),useCache:i.default(e.useCache,m.useCache)}}s.boundingBox=function(e){if(1===this.length&&this[0]._private.bbCache&&(void 0===e||void 0===e.useCache||!0===e.useCache))return e=void 0===e?m:b(e),g(this[0],e);var t={x1:1/0,y1:1/0,x2:-1/0,y2:-1/0},n=b(e=e||i.staticEmptyObject()),r=this.cy().styleEnabled();r&&this.recalculateRenderedStyle(n.useCache),this.updateCompoundBounds();for(var o,a,s={},u=0;u1&&!a){var s=this.length-1,l=this[s],c=l._private.data.id;this[s]=void 0,this[o]=l,r.set(c,{ele:l,index:o})}return this.length--,this},unmerge:function(e){var t=this._private.cy;if(!e)return this;if(e&&r.string(e)){var n=e;e=t.mutableElements().filter(n)}for(var i=0;in&&(n=a,r=o)}return{value:n,ele:r}},min:function(e,t){for(var n=1/0,r=void 0,i=0;i=0&&i0&&t.push(u[0]),t.push(s[0])}return this.spawn(t,{unique:!0}).filter(e)}),"neighborhood"),closedNeighborhood:function(e){return this.neighborhood().add(this).filter(e)},openNeighborhood:function(e){return this.neighborhood(e)}}),o.neighbourhood=o.neighborhood,o.closedNeighbourhood=o.closedNeighborhood,o.openNeighbourhood=o.openNeighborhood,r.extend(o,{source:a((function(e){var t=this[0],n=void 0;return t&&(n=t._private.source||t.cy().collection()),n&&e?n.filter(e):n}),"source"),target:a((function(e){var t=this[0],n=void 0;return t&&(n=t._private.target||t.cy().collection()),n&&e?n.filter(e):n}),"target"),sources:u({attr:"source"}),targets:u({attr:"target"})}),r.extend(o,{edgesWith:a(d(),"edgesWith"),edgesTo:a(d({thisIsSrc:!0}),"edgesTo")}),r.extend(o,{connectedEdges:a((function(e){for(var t=[],n=0;n0);return i.map((function(e){var t=e.connectedEdges().stdFilter((function(t){return e.anySame(t.source())&&e.anySame(t.target())}));return e.union(t)}))}}),e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(7),a=n(14),s={add:function(e){var t=void 0,n=this;if(r.elementOrCollection(e)){var s=e;if(s._private.cy===n)t=s.restore();else{for(var l=[],c=0;c=0;t--)(0,e[t])();e.splice(0,e.length)},p=s.length-1;p>=0;p--){var h=s[p],g=h._private;g.stopped?(s.splice(p,1),g.hooked=!1,g.playing=!1,g.started=!1,f(g.frames)):(g.playing||g.applying)&&(g.playing&&g.applying&&(g.applying=!1),g.started||i(t,h,e,n),r(t,h,e,n),g.applying&&(g.applying=!1),f(g.frames),h.completed()&&(s.splice(p,1),g.hooked=!1,g.playing=!1,g.started=!1,f(g.completes)),c=!0)}return n||0!==s.length||0!==l.length||o.push(t),c}for(var s=!1,l=0;l0?(n.dirtyCompoundBoundsCache(),t.notify({type:"draw",eles:n})):t.notify({type:"draw"})),n.unmerge(o),t.emit("step")}},function(e,t,n){"use strict";var r=n(73),i=n(76),o=n(0);function a(e,t){return!!(null!=e&&null!=t&&(o.number(e)&&o.number(t)||e&&t))}e.exports=function(e,t,n,s){var l=!s,c=e._private,u=t._private,d=u.easing,f=u.startTime,p=(s?e:e.cy()).style();if(!u.easingImpl)if(null==d)u.easingImpl=r.linear;else{var h=void 0;h=o.string(d)?p.parse("transition-timing-function",d).value:d;var g=void 0,m=void 0;o.string(h)?(g=h,m=[]):(g=h[1],m=h.slice(2).map((function(e){return+e}))),m.length>0?("spring"===g&&m.push(u.duration),u.easingImpl=r[g].apply(null,m)):u.easingImpl=r[g]}var v=u.easingImpl,b=void 0;if(b=0===u.duration?1:(n-f)/u.duration,u.applying&&(b=u.progress),b<0?b=0:b>1&&(b=1),null==u.delay){var y=u.startPosition,x=u.position;if(x&&l&&!e.locked()){var w=e.position();a(y.x,x.x)&&(w.x=i(y.x,x.x,b,v)),a(y.y,x.y)&&(w.y=i(y.y,x.y,b,v)),e.emit("position")}var k=u.startPan,A=u.pan,E=c.pan,S=null!=A&&s;S&&(a(k.x,A.x)&&(E.x=i(k.x,A.x,b,v)),a(k.y,A.y)&&(E.y=i(k.y,A.y,b,v)),e.emit("pan"));var $=u.startZoom,C=u.zoom,_=null!=C&&s;_&&(a($,C)&&(c.zoom=i($,C,b,v)),e.emit("zoom")),(S||_)&&e.emit("viewport");var O=u.style;if(O&&O.length>0&&l){for(var j=0;j0?i=l:r=l}while(Math.abs(o)>a&&++c=o?b(t,s):0===u?s:x(t,r,r+c)}var k=!1;function A(){k=!0,e===t&&n===r||y()}var E=function(i){return k||A(),e===t&&n===r?i:0===i?0:1===i?1:m(w(i),t,r)};E.getControlPoints=function(){return[{x:e,y:t},{x:n,y:r}]};var S="generateBezier("+[e,t,n,r]+")";return E.toString=function(){return S},E}},function(e,t,n){"use strict"; -/*! Runge-Kutta spring physics function generator. Adapted from Framer.js, copyright Koen Bok. MIT License: http://en.wikipedia.org/wiki/MIT_License */var r=function(){function e(e){return-e.tension*e.x-e.friction*e.v}function t(t,n,r){var i={x:t.x+r.dx*n,v:t.v+r.dv*n,tension:t.tension,friction:t.friction};return{dx:i.v,dv:e(i)}}function n(n,r){var i={dx:n.v,dv:e(n)},o=t(n,.5*r,i),a=t(n,.5*r,o),s=t(n,r,a),l=1/6*(i.dx+2*(o.dx+a.dx)+s.dx),c=1/6*(i.dv+2*(o.dv+a.dv)+s.dv);return n.x=n.x+l*r,n.v=n.v+c*r,n}return function e(t,r,i){var o,a={x:-1,v:0,tension:null,friction:null},s=[0],l=0,c=void 0,u=void 0;for(t=parseFloat(t)||500,r=parseFloat(r)||20,i=i||null,a.tension=t,a.friction=r,c=(o=null!==i)?(l=e(t,r))/i*.016:.016;u=n(u||a,c),s.push(1+u.x),l+=16,Math.abs(u.x)>1e-4&&Math.abs(u.v)>1e-4;);return o?function(e){return s[e*(s.length-1)|0]}:l}}();e.exports=r},function(e,t,n){"use strict";var r=n(0);function i(e,t,n,r,i){if(1===r)return n;var o=i(t,n,r);return null==e||((e.roundValue||e.color)&&(o=Math.round(o)),void 0!==e.min&&(o=Math.max(o,e.min)),void 0!==e.max&&(o=Math.min(o,e.max))),o}function o(e,t){return null!=e.pfValue||null!=e.value?null==e.pfValue||null!=t&&"%"===t.type.units?e.value:e.pfValue:e}e.exports=function(e,t,n,a,s){var l=null!=s?s.type:null;n<0?n=0:n>1&&(n=1);var c=o(e,s),u=o(t,s);if(r.number(c)&&r.number(u))return i(l,c,u,n,a);if(r.array(c)&&r.array(u)){for(var d=[],f=0;f0},startBatch:function(){var e=this._private;return null==e.batchCount&&(e.batchCount=0),0===e.batchCount&&(e.batchingStyle=e.batchingNotify=!0,e.batchStyleEles=this.collection(),e.batchNotifyEles=this.collection(),e.batchNotifyTypes=[],e.batchNotifyTypes.ids={}),e.batchCount++,this},endBatch:function(){var e=this._private;return e.batchCount--,0===e.batchCount&&(e.batchingStyle=!1,e.batchStyleEles.updateStyle(),e.batchingNotify=!1,this.notify({type:e.batchNotifyTypes,eles:e.batchNotifyEles})),this},batch:function(e){return this.startBatch(),e(),this.endBatch(),this},batchData:function(e){var t=this;return this.batch((function(){for(var n=Object.keys(e),r=0;r0;)e.removeChild(e.childNodes[0]);this._private.renderer=null},onRender:function(e){return this.on("render",e)},offRender:function(e){return this.off("render",e)}};i.invalidateDimensions=i.resize,e.exports=i},function(e,t,n){"use strict";var r=n(0),i=n(7),o={collection:function(e,t){return r.string(e)?this.$(e):r.elementOrCollection(e)?e.collection():r.array(e)?new i(this,e,t):new i(this)},nodes:function(e){var t=this.$((function(e){return e.isNode()}));return e?t.filter(e):t},edges:function(e){var t=this.$((function(e){return e.isEdge()}));return e?t.filter(e):t},$:function(e){var t=this._private.elements;return e?t.filter(e):t.spawnSelf()},mutableElements:function(){return this._private.elements}};o.elements=o.filter=o.$,e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(18),o={style:function(e){return e&&this.setStyle(e).update(),this._private.style},setStyle:function(e){var t=this._private;return r.stylesheet(e)?t.style=e.generateStyle(this):r.array(e)?t.style=i.fromJson(this,e):r.string(e)?t.style=i.fromString(this,e):t.style=i(this),t.style}};e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(5),a={apply:function(e){var t=this._private,n=t.cy.collection();t.newStyle&&(t.contextStyles={},t.propDiffs={},this.cleanElements(e,!0));for(var r=0;r0;if(c||u){var d=void 0;c&&u||c?d=l.properties:u&&(d=l.mappedProperties);for(var f=0;f0){n=!0;break}t.hasPie=n;var i=e.pstyle("text-transform").strValue,o=e.pstyle("label").strValue,a=e.pstyle("source-label").strValue,s=e.pstyle("target-label").strValue,l=e.pstyle("font-style").strValue,c=e.pstyle("font-size").pfValue+"px",u=e.pstyle("font-family").strValue,d=e.pstyle("font-weight").strValue,f=l+"$"+c+"$"+u+"$"+d+"$"+i+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-outline-width").pfValue+"$"+e.pstyle("text-wrap").strValue+"$"+e.pstyle("text-max-width").pfValue;t.labelStyleKey=f,t.sourceLabelKey=f+"$"+a,t.targetLabelKey=f+"$"+s,t.labelKey=f+"$"+o,t.fontKey=l+"$"+d+"$"+c+"$"+u,t.styleKey=Date.now()}},applyParsedProperty:function(e,t){var n=this,o=t,a=e._private.style,s=void 0,l=n.types,c=n.properties[o.name].type,u=o.bypass,d=a[o.name],f=d&&d.bypass,p=e._private,h=function(){n.checkZOrderTrigger(e,o.name,d?d.value:null,o.value)};if("curve-style"===t.name&&"haystack"===t.value&&e.isEdge()&&(e.isLoop()||e.source().isParent()||e.target().isParent())&&(o=t=this.parse(t.name,"bezier",u)),o.delete)return a[o.name]=void 0,h(),!0;if(o.deleteBypassed)return d?!!d.bypass&&(d.bypassed=void 0,h(),!0):(h(),!0);if(o.deleteBypass)return d?!!d.bypass&&(a[o.name]=d.bypassed,h(),!0):(h(),!0);var g=function(){r.error("Do not assign mappings to elements without corresponding data (e.g. ele `"+e.id()+"` for property `"+o.name+"` with data field `"+o.field+"`); try a `["+o.field+"]` selector to limit scope to elements with `"+o.field+"` defined")};switch(o.mapped){case l.mapData:for(var m=o.field.split("."),v=p.data,b=0;b1&&(y=1),c.color){var x=o.valueMin[0],w=o.valueMax[0],k=o.valueMin[1],A=o.valueMax[1],E=o.valueMin[2],S=o.valueMax[2],$=null==o.valueMin[3]?1:o.valueMin[3],C=null==o.valueMax[3]?1:o.valueMax[3],_=[Math.round(x+(w-x)*y),Math.round(k+(A-k)*y),Math.round(E+(S-E)*y),Math.round($+(C-$)*y)];s={bypass:o.bypass,name:o.name,value:_,strValue:"rgb("+_[0]+", "+_[1]+", "+_[2]+")"}}else{if(!c.number)return!1;var O=o.valueMin+(o.valueMax-o.valueMin)*y;s=this.parse(o.name,O,o.bypass,"mapping")}s||(s=this.parse(o.name,d.strValue,o.bypass,"mapping")),s||g(),s.mapping=o,o=s;break;case l.data:var j=o.field.split("."),T=p.data;if(T)for(var P=0;P0&&l>0){for(var u={},d=!1,f=0;f0?e.delayAnimation(c).play().promise().then(t):t()})).then((function(){return e.animation({style:u,duration:l,easing:e.pstyle("transition-timing-function").value,queue:!1}).play().promise()})).then((function(){r.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1}))}else a.transitioning&&(this.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1)},checkZOrderTrigger:function(e,t,n,r){var i=this.properties[t];null==i.triggersZOrder||null!=n&&!i.triggersZOrder(n,r)||this._private.cy.notify({type:"zorder",eles:e})}};e.exports=a},function(e,t,n){"use strict";var r=n(0),i=n(1),o={applyBypass:function(e,t,n,o){var a=[];if("*"===t||"**"===t){if(void 0!==n)for(var s=0;sn.length?t.substr(n.length):""}function l(){o=o.length>a.length?o.substr(a.length):""}for(t=t.replace(/[/][*](\s|.)+?[*][/]/g,"");!t.match(/^\s*$/);){var c=t.match(/^\s*((?:.|\s)+?)\s*\{((?:.|\s)+?)\}/);if(!c){r.error("Halting stylesheet parsing: String stylesheet contains more to parse but no selector and block found in: "+t);break}n=c[0];var u=c[1];if("core"!==u&&new i(u)._private.invalid)r.error("Skipping parsing of block: Invalid selector found in string stylesheet: "+u),s();else{var d=c[2],f=!1;o=d;for(var p=[];!o.match(/^\s*$/);){var h=o.match(/^\s*(.+?)\s*:\s*(.+?)\s*;/);if(!h){r.error("Skipping parsing of block: Invalid formatting of style property and value definitions found in:"+d),f=!0;break}a=h[0];var g=h[1],m=h[2];this.properties[g]?this.parse(g,m)?(p.push({name:g,val:m}),l()):(r.error("Skipping property: Invalid property definition in: "+a),l()):(r.error("Skipping property: Invalid property name in: "+a),l())}if(f){s();break}this.selector(u);for(var v=0;v node").css({shape:"rectangle",padding:10,"background-color":"#eee","border-color":"#ccc","border-width":1}).selector("edge").css({width:3,"curve-style":"haystack"}).selector(":parent <-> node").css({"curve-style":"bezier","source-endpoint":"outside-to-line","target-endpoint":"outside-to-line"}).selector(":selected").css({"background-color":"#0169D9","line-color":"#0169D9","source-arrow-color":"#0169D9","target-arrow-color":"#0169D9","mid-source-arrow-color":"#0169D9","mid-target-arrow-color":"#0169D9"}).selector("node:parent:selected").css({"background-color":"#CCE1F9","border-color":"#aec8e5"}).selector(":active").css({"overlay-color":"black","overlay-padding":10,"overlay-opacity":.25}).selector("core").css({"selection-box-color":"#ddd","selection-box-opacity":.65,"selection-box-border-color":"#aaa","selection-box-border-width":1,"active-bg-color":"black","active-bg-opacity":.15,"active-bg-size":30,"outside-texture-bg-color":"#000","outside-texture-bg-opacity":.125}),this.defaultLength=this.length},e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(2),a={parse:function(e,t,n,o){if(i.fn(t))return this.parseImplWarn(e,t,n,o);var a=[e,t,n,"mapping"===o||!0===o||!1===o||null==o?"dontcare":o].join("$"),s=this.propCache=this.propCache||{},l=void 0;return(l=s[a])||(l=s[a]=this.parseImplWarn(e,t,n,o)),(n||"mapping"===o)&&(l=r.copy(l))&&(l.value=r.copy(l.value)),l},parseImplWarn:function(e,t,n,i){var o=this.parseImpl(e,t,n,i);return o||null==t||r.error("The style property `%s: %s` is invalid",e,t),o},parseImpl:function(e,t,n,a){e=r.camel2dash(e);var s=this.properties[e],l=t,c=this.types;if(!s)return null;if(void 0===t)return null;s.alias&&(s=s.pointsTo,e=s.name);var u=i.string(t);u&&(t=t.trim());var d=s.type;if(!d)return null;if(n&&(""===t||null===t))return{name:e,value:t,bypass:!0,deleteBypass:!0};if(i.fn(t))return{name:e,value:t,strValue:"fn",mapped:c.fn,bypass:n};var f=void 0,p=void 0;if(!u||a);else{if(f=new RegExp(c.data.regex).exec(t)){if(n)return!1;var h=c.data;return{name:e,value:f,strValue:""+t,mapped:h,field:f[1],bypass:n}}if(p=new RegExp(c.mapData.regex).exec(t)){if(n)return!1;if(d.multiple)return!1;var g=c.mapData;if(!d.color&&!d.number)return!1;var m=this.parse(e,p[4]);if(!m||m.mapped)return!1;var v=this.parse(e,p[5]);if(!v||v.mapped)return!1;if(m.value===v.value)return!1;if(d.color){var b=m.value,y=v.value;if(!(b[0]!==y[0]||b[1]!==y[1]||b[2]!==y[2]||b[3]!==y[3]&&(null!=b[3]&&1!==b[3]||null!=y[3]&&1!==y[3])))return!1}return{name:e,value:p,strValue:""+t,mapped:g,field:p[1],fieldMin:parseFloat(p[2]),fieldMax:parseFloat(p[3]),valueMin:m.value,valueMax:v.value,bypass:n}}}if(d.multiple&&"multiple"!==a){var x=void 0;if(x=u?t.split(/\s+/):i.array(t)?t:[t],d.evenMultiple&&x.length%2!=0)return null;for(var w=[],k=[],A=[],E=!1,S=0;Sd.max||d.strictMax&&t===d.max))return null;var P={name:e,value:t,strValue:""+t+(_||""),units:_,bypass:n};return d.unitless||"px"!==_&&"em"!==_?P.pfValue=t:P.pfValue="px"!==_&&_?this.getEmSizeInPixels()*t:t,"ms"!==_&&"s"!==_||(P.pfValue="ms"===_?t:1e3*t),"deg"!==_&&"rad"!==_||(P.pfValue="rad"===_?t:o.deg2rad(t)),"%"===_&&(P.pfValue=t/100),P}if(d.propList){var D=[],R=""+t;if("none"===R);else{for(var I=R.split(","),N=0;N0&&s>0&&!isNaN(n.w)&&!isNaN(n.h)&&n.w>0&&n.h>0)return{zoom:l=(l=(l=Math.min((a-2*t)/n.w,(s-2*t)/n.h))>this._private.maxZoom?this._private.maxZoom:l)t.maxZoom?t.maxZoom:s)t.maxZoom||!t.zoomingEnabled?a=!0:(t.zoom=l,o.push("zoom"))}if(i&&(!a||!e.cancelOnFailedZoom)&&t.panningEnabled){var c=e.pan;r.number(c.x)&&(t.pan.x=c.x,s=!1),r.number(c.y)&&(t.pan.y=c.y,s=!1),s||o.push("pan")}return o.length>0&&(o.push("viewport"),this.emit(o.join(" ")),this.notify({type:"viewport"})),this},center:function(e){var t=this.getCenterPan(e);return t&&(this._private.pan=t,this.emit("pan viewport"),this.notify({type:"viewport"})),this},getCenterPan:function(e,t){if(this._private.panningEnabled){if(r.string(e)){var n=e;e=this.mutableElements().filter(n)}else r.elementOrCollection(e)||(e=this.mutableElements());if(0!==e.length){var i=e.boundingBox(),o=this.width(),a=this.height();return{x:(o-(t=void 0===t?this._private.zoom:t)*(i.x1+i.x2))/2,y:(a-t*(i.y1+i.y2))/2}}}},reset:function(){return this._private.panningEnabled&&this._private.zoomingEnabled?(this.viewport({pan:{x:0,y:0},zoom:1}),this):this},invalidateSize:function(){this._private.sizeCache=null},size:function(){var e,t,n=this._private,r=n.container;return n.sizeCache=n.sizeCache||(r?(e=i.getComputedStyle(r),t=function(t){return parseFloat(e.getPropertyValue(t))},{width:r.clientWidth-t("padding-left")-t("padding-right"),height:r.clientHeight-t("padding-top")-t("padding-bottom")}):{width:1,height:1})},width:function(){return this.size().width},height:function(){return this.size().height},extent:function(){var e=this._private.pan,t=this._private.zoom,n=this.renderedExtent(),r={x1:(n.x1-e.x)/t,x2:(n.x2-e.x)/t,y1:(n.y1-e.y)/t,y2:(n.y2-e.y)/t};return r.w=r.x2-r.x1,r.h=r.y2-r.y1,r},renderedExtent:function(){var e=this.width(),t=this.height();return{x1:0,y1:0,x2:e,y2:t,w:e,h:t}}};a.centre=a.center,a.autolockNodes=a.autolock,a.autoungrabifyNodes=a.autoungrabify,e.exports=a},function(e,t,n){"use strict";var r=n(1),i=n(4),o=n(7),a=n(12),s=n(95),l=n(0),c=n(11),u={},d={};function f(e,t,n){var s=n,d=function(n){r.error("Can not register `"+t+"` for `"+e+"` since `"+n+"` already exists in the prototype and can not be overridden")};if("core"===e){if(a.prototype[t])return d(t);a.prototype[t]=n}else if("collection"===e){if(o.prototype[t])return d(t);o.prototype[t]=n}else if("layout"===e){for(var f=function(e){this.options=e,n.call(this,e),l.plainObject(this._private)||(this._private={}),this._private.cy=e.cy,this._private.listeners=[],this.createEmitter()},h=f.prototype=Object.create(n.prototype),g=[],m=0;m0;)m();c=n.collection();for(var v=function(e){var t=h[e],n=t.maxDegree(!1),r=t.filter((function(e){return e.degree(!1)===n}));c=c.add(r)},b=0;by.length-1;)y.push([]);y[J].push(X),Z.depth=J,Z.index=y[J].length-1}N()}var K=0;if(t.avoidOverlap)for(var ee=0;eec||0===t)&&(r+=l/u,i++)}return r/=i=Math.max(1,i),0===i&&(r=void 0),ie[e.id()]=r,r},ae=function(e,t){return oe(e)-oe(t)},se=0;se<3;se++){for(var le=0;le0&&y[0].length<=3?u/2:0),f=2*Math.PI/y[i].length*o;return 0===i&&1===y[0].length&&(d=1),{x:de+d*Math.cos(f),y:fe+d*Math.sin(f)}}return{x:de+(o+1-(a+1)/2)*s,y:(i+1)*c}}var p={x:de+(o+1-(a+1)/2)*s,y:(i+1)*c};return p},he={},ge=y.length-1;ge>=0;ge--)for(var me=y[ge],ve=0;ve1&&t.avoidOverlap){p*=1.75;var b=Math.cos(d)-Math.cos(0),y=Math.sin(d)-Math.sin(0),x=Math.sqrt(p*p/(b*b+y*y));f=Math.max(x,f)}return s.layoutPositions(this,t,(function(e,n){var r=t.startAngle+n*d*(a?1:-1),i=f*Math.cos(r),o=f*Math.sin(r);return{x:c+i,y:u+o}})),this},e.exports=s},function(e,t,n){"use strict";var r=n(1),i=n(2),o={fit:!0,padding:30,startAngle:1.5*Math.PI,sweep:void 0,clockwise:!0,equidistant:!1,minNodeSpacing:10,boundingBox:void 0,avoidOverlap:!0,nodeDimensionsIncludeLabels:!1,height:void 0,width:void 0,spacingFactor:void 0,concentric:function(e){return e.degree()},levelWidth:function(e){return e.maxDegree()/4},animate:!1,animationDuration:500,animationEasing:void 0,animateFilter:function(e,t){return!0},ready:void 0,stop:void 0,transform:function(e,t){return t}};function a(e){this.options=r.extend({},o,e)}a.prototype.run=function(){for(var e=this.options,t=e,n=void 0!==t.counterclockwise?!t.counterclockwise:t.clockwise,r=e.cy,o=t.eles.nodes().not(":parent"),a=i.makeBoundingBox(t.boundingBox?t.boundingBox:{x1:0,y1:0,w:r.width(),h:r.height()}),s=a.x1+a.w/2,l=a.y1+a.h/2,c=[],u=(t.startAngle,0),d=0;d0&&Math.abs(b[0].value-x.value)>=m&&(b=[],v.push(b)),b.push(x)}var w=u+t.minNodeSpacing;if(!t.avoidOverlap){var k=v.length>0&&v[0].length>1,A=(Math.min(a.w,a.h)/2-w)/(v.length+k?1:0);w=Math.min(w,A)}for(var E=0,S=0;S1&&t.avoidOverlap){var O=Math.cos(_)-Math.cos(0),j=Math.sin(_)-Math.sin(0),T=Math.sqrt(w*w/(O*O+j*j));E=Math.max(T,E)}$.r=E,E+=w}if(t.equidistant){for(var P=0,D=0,R=0;R0)var c=(f=r.nodeOverlap*s)*i/(b=Math.sqrt(i*i+o*o)),d=f*o/b;else{var f,p=u(e,i,o),h=u(t,-1*i,-1*o),g=h.x-p.x,m=h.y-p.y,v=g*g+m*m,b=Math.sqrt(v);c=(f=(e.nodeRepulsion+t.nodeRepulsion)/v)*g/b,d=f*m/b}e.isLocked||(e.offsetX-=c,e.offsetY-=d),t.isLocked||(t.offsetX+=c,t.offsetY+=d)}},l=function(e,t,n,r){if(n>0)var i=e.maxX-t.minX;else i=t.maxX-e.minX;if(r>0)var o=e.maxY-t.minY;else o=t.maxY-e.minY;return i>=0&&o>=0?Math.sqrt(i*i+o*o):0},u=function(e,t,n){var r=e.positionX,i=e.positionY,o=e.height||1,a=e.width||1,s=n/t,l=o/a,c={};return 0===t&&0n?(c.x=r,c.y=i+o/2,c):0t&&-1*l<=s&&s<=l?(c.x=r-a/2,c.y=i-a*n/2/t,c):0=l)?(c.x=r+o*t/2/n,c.y=i+o/2,c):0>n&&(s<=-1*l||s>=l)?(c.x=r-o*t/2/n,c.y=i-o/2,c):c},d=function(e,t){for(var n=0;n1){var h=t.gravity*d/p,g=t.gravity*f/p;u.offsetX+=h,u.offsetY+=g}}}}},p=function(e,t){var n=[],r=0,i=-1;for(n.push.apply(n,e.graphSet[0]),i+=e.graphSet[0].length;r<=i;){var o=n[r++],a=e.idToIndex[o],s=e.layoutNodes[a],l=s.children;if(0n)var i={x:n*e/r,y:n*t/r};else i={x:e,y:t};return i},m=function e(t,n){var r=t.parentId;if(null!=r){var i=n.layoutNodes[n.idToIndex[r]],o=!1;return(null==i.maxX||t.maxX+i.padRight>i.maxX)&&(i.maxX=t.maxX+i.padRight,o=!0),(null==i.minX||t.minX-i.padLefti.maxY)&&(i.maxY=t.maxY+i.padBottom,o=!0),(null==i.minY||t.minY-i.padTopg&&(f+=h+t.componentSpacing,d=0,p=0,h=0)}}}(0,i),r})).then((function(e){d.layoutNodes=e.layoutNodes,o.stop(),b()}));var b=function(){!0===e.animate||!1===e.animate?v({force:!0,next:function(){n.one("layoutstop",e.stop),n.emit({type:"layoutstop",layout:n})}}):e.eles.nodes().layoutPositions(n,e,(function(e){var t=d.layoutNodes[d.idToIndex[e.data("id")]];return{x:t.positionX,y:t.positionY}}))};return this},c.prototype.stop=function(){return this.stopped=!0,this.thread&&this.thread.stop(),this.emit("layoutstop"),this},c.prototype.destroy=function(){return this.thread&&this.thread.stop(),this};var u=function(e,t,n){for(var r=n.eles.edges(),i=n.eles.nodes(),s={isCompound:e.hasCompoundNodes(),layoutNodes:[],idToIndex:{},nodeSize:i.size(),graphSet:[],indexToGraph:[],layoutEdges:[],edgeSize:r.size(),temperature:n.initialTemp,clientWidth:e.width(),clientHeight:e.width(),boundingBox:o.makeBoundingBox(n.boundingBox?n.boundingBox:{x1:0,y1:0,w:e.width(),h:e.height()})},l=n.eles.components(),c={},u=0;u0)for(s.graphSet.push(A),u=0;ur.count?0:r.graph},f=function e(t,n,r,i){var o=i.graphSet[r];if(-1a){var h=u(),g=d();(h-1)*g>=a?u(h-1):(g-1)*h>=a&&d(g-1)}else for(;c*l=a?d(v+1):u(m+1)}var b=o.w/c,y=o.h/l;if(t.condense&&(b=0,y=0),t.avoidOverlap)for(var x=0;x=c&&(T=0,j++)},D={},R=0;R(r=i.sqdistToFiniteLine(e,t,w[k],w[k+1],w[k+2],w[k+3])))return b(n,r),!0}else if("bezier"===a.edgeType||"multibezier"===a.edgeType||"self"===a.edgeType||"compound"===a.edgeType)for(w=a.allpts,k=0;k+5(r=i.sqdistToQuadraticBezier(e,t,w[k],w[k+1],w[k+2],w[k+3],w[k+4],w[k+5])))return b(n,r),!0;v=v||o.source,x=x||o.target;var A=l.getArrowWidth(s,u),E=[{name:"source",x:a.arrowStartX,y:a.arrowStartY,angle:a.srcArrowAngle},{name:"target",x:a.arrowEndX,y:a.arrowEndY,angle:a.tgtArrowAngle},{name:"mid-source",x:a.midX,y:a.midY,angle:a.midsrcArrowAngle},{name:"mid-target",x:a.midX,y:a.midY,angle:a.midtgtArrowAngle}];for(k=0;k0&&(y(v),y(x))}function w(e,t,n){return o.getPrefixedProperty(e,t,n)}function k(n,r){var o,a=n._private,s=m;o=r?r+"-":"";var l=n.pstyle(o+"label").value;if("yes"===n.pstyle("text-events").strValue&&l){var c=a.rstyle,u=n.pstyle("text-border-width").pfValue,d=n.pstyle("text-background-padding").pfValue,f=w(c,"labelWidth",r)+u+2*s+2*d,p=w(c,"labelHeight",r)+u+2*s+2*d,h=w(c,"labelX",r),g=w(c,"labelY",r),v=w(a.rscratch,"labelAngle",r),y=h-f/2,x=h+f/2,k=g-p/2,A=g+p/2;if(v){var E=Math.cos(v),S=Math.sin(v),$=function(e,t){return{x:(e-=h)*E-(t-=g)*S+h,y:e*S+t*E+g}},C=$(y,k),_=$(y,A),O=$(x,k),j=$(x,A),T=[C.x,C.y,O.x,O.y,j.x,j.y,_.x,_.y];if(i.pointInsidePolygonPoints(e,t,T))return b(n),!0}else{var P={w:f,h:p,x1:y,x2:x,y1:k,y2:A};if(i.inBoundingBox(P,e,t))return b(n),!0}}}n&&(u=u.interactive);for(var A=u.length-1;A>=0;A--){var E=u[A];E.isNode()?y(E)||k(E):x(E)||k(E)||k(E,"source")||k(E,"target")}return d},getAllInBox:function(e,t,n,r){var o=this.getCachedZSortedEles().interactive,a=[],s=Math.min(e,n),l=Math.max(e,n),c=Math.min(t,r),u=Math.max(t,r);e=s,n=l,t=c,r=u;for(var d=i.makeBoundingBox({x1:e,y1:t,x2:n,y2:r}),f=0;fb?b+"$-$"+v:v+"$-$"+b,g&&(t="unbundled$-$"+h.id);var y=u[t];null==y&&(y=u[t]=[],d.push(t)),y.push(Bt),g&&(y.hasUnbundled=!0),m&&(y.hasBezier=!0)}else f.push(Bt)}for(var x=0;xGt.id()){var k=Ht;Ht=Gt,Gt=k}Wt=Ht.position(),Yt=Gt.position(),Xt=Ht.outerWidth(),Qt=Ht.outerHeight(),Zt=Gt.outerWidth(),Jt=Gt.outerHeight(),n=l.nodeShapes[this.getNodeShape(Ht)],o=l.nodeShapes[this.getNodeShape(Gt)],s=!1;var A={north:0,west:0,south:0,east:0,northwest:0,southwest:0,northeast:0,southeast:0},E=Wt.x,S=Wt.y,$=Xt,C=Qt,_=Yt.x,O=Yt.y,j=Zt,T=Jt,P=w.length;for(p=0;p=d||w){p={cp:b,segment:x};break}}if(p)break}b=p.cp;var k=(d-g)/(x=p.segment).length,A=x.t1-x.t0,E=u?x.t0+A*k:x.t1-A*k;E=r.bound(0,E,1),t=r.qbezierPtAt(b.p0,b.p1,b.p2,E),c=function(e,t,n,i){var o=r.bound(0,i-.001,1),a=r.bound(0,i+.001,1),s=r.qbezierPtAt(e,t,n,o),l=r.qbezierPtAt(e,t,n,a);return f(s,l)}(b.p0,b.p1,b.p2,E);break;case"straight":case"segments":case"haystack":var S,$,C,_,O=0,j=i.allpts.length;for(v=0;v+3=d));v+=2);E=(d-$)/S,E=r.bound(0,E,1),t=r.lineAt(C,_,E),c=f(C,_)}l("labelX",o,t.x),l("labelY",o,t.y),l("labelAutoAngle",o,c)}};c("source"),c("target"),this.applyLabelDimensions(e)}},applyLabelDimensions:function(e){this.applyPrefixedLabelDimensions(e),e.isEdge()&&(this.applyPrefixedLabelDimensions(e,"source"),this.applyPrefixedLabelDimensions(e,"target"))},applyPrefixedLabelDimensions:function(e,t){var n=e._private,r=this.getLabelText(e,t),i=this.calculateLabelDimensions(e,r);o.setPrefixedProperty(n.rstyle,"labelWidth",t,i.width),o.setPrefixedProperty(n.rscratch,"labelWidth",t,i.width),o.setPrefixedProperty(n.rstyle,"labelHeight",t,i.height),o.setPrefixedProperty(n.rscratch,"labelHeight",t,i.height)},getLabelText:function(e,t){var n=e._private,r=t?t+"-":"",i=e.pstyle(r+"label").strValue,a=e.pstyle("text-transform").value,s=function(e,r){return r?(o.setPrefixedProperty(n.rscratch,e,t,r),r):o.getPrefixedProperty(n.rscratch,e,t)};"none"==a||("uppercase"==a?i=i.toUpperCase():"lowercase"==a&&(i=i.toLowerCase()));var l=e.pstyle("text-wrap").value;if("wrap"===l){var c=s("labelKey");if(c&&s("labelWrapKey")===c)return s("labelWrapCachedText");for(var u=i.split("\n"),d=e.pstyle("text-max-width").pfValue,f=[],p=0;pd){for(var g=h.split(/\s+/),m="",v=0;vd);k++)x+=i[k],k===i.length-1&&(w=!0);return w||(x+="…"),x}return i},calculateLabelDimensions:function(e,t,n){var r=e._private.labelStyleKey+"$@$"+t;n&&(r+="$@$"+n);var i=this.labelDimCache||(this.labelDimCache={});if(i[r])return i[r];var o=e.pstyle("font-style").strValue,a=1*e.pstyle("font-size").pfValue+"px",s=e.pstyle("font-family").strValue,l=e.pstyle("font-weight").strValue,c=this.labelCalcDiv;c||(c=this.labelCalcDiv=document.createElement("div"),document.body.appendChild(c));var u=c.style;return u.fontFamily=s,u.fontStyle=o,u.fontSize=a,u.fontWeight=l,u.position="absolute",u.left="-9999px",u.top="-9999px",u.zIndex="-1",u.visibility="hidden",u.pointerEvents="none",u.padding="0",u.lineHeight="1","wrap"===e.pstyle("text-wrap").value?u.whiteSpace="pre":u.whiteSpace="normal",c.textContent=t,i[r]={width:Math.ceil(c.clientWidth/1),height:Math.ceil(c.clientHeight/1)},i[r]},calculateLabelAngles:function(e){var t=e._private.rscratch,n=e.isEdge(),r=e.pstyle("text-rotation"),i=r.strValue;"none"===i?t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle=0:n&&"autorotate"===i?(t.labelAngle=Math.atan(t.midDispY/t.midDispX),t.sourceLabelAngle=t.sourceLabelAutoAngle,t.targetLabelAngle=t.targetLabelAutoAngle):t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle="autorotate"===i?0:r.pfValue}};e.exports=a},function(e,t,n){"use strict";var r={getNodeShape:function(e){var t=e.pstyle("shape").value;if(e.isParent())return"rectangle"===t||"roundrectangle"===t||"cutrectangle"===t||"barrel"===t?t:"rectangle";if("polygon"===t){var n=e.pstyle("shape-polygon-points").value;return this.nodeShapes.makePolygon(n).name}return t}};e.exports=r},function(e,t,n){"use strict";var r={registerCalculationListeners:function(){var e=this.cy,t=e.collection(),n=this,r=function(e,n){var r=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];t.merge(e);for(var i=0;i=e.desktopTapThreshold2}var C=n(i);b&&(e.hoverData.tapholdCancelled=!0),s=!0,t(v,["mousemove","vmousemove","tapdrag"],i,{position:{x:p[0],y:p[1]}});var _=function(){e.data.bgActivePosistion=void 0,e.hoverData.selecting||l.emit("boxstart"),m[4]=1,e.hoverData.selecting=!0,e.redrawHint("select",!0),e.redraw()};if(3===e.hoverData.which){if(b){var O={originalEvent:i,type:"cxtdrag",position:{x:p[0],y:p[1]}};x?x.emit(O):l.emit(O),e.hoverData.cxtDragged=!0,e.hoverData.cxtOver&&v===e.hoverData.cxtOver||(e.hoverData.cxtOver&&e.hoverData.cxtOver.emit({originalEvent:i,type:"cxtdragout",position:{x:p[0],y:p[1]}}),e.hoverData.cxtOver=v,v&&v.emit({originalEvent:i,type:"cxtdragover",position:{x:p[0],y:p[1]}}))}}else if(e.hoverData.dragging){if(s=!0,l.panningEnabled()&&l.userPanningEnabled()){var T;if(e.hoverData.justStartedPan){var P=e.hoverData.mdownPos;T={x:(p[0]-P[0])*c,y:(p[1]-P[1])*c},e.hoverData.justStartedPan=!1}else T={x:w[0]*c,y:w[1]*c};l.panBy(T),e.hoverData.dragged=!0}p=e.projectIntoViewport(i.clientX,i.clientY)}else if(1!=m[4]||null!=x&&!x.isEdge()){if(x&&x.isEdge()&&x.active()&&x.unactivate(),x&&x.grabbed()||v==y||(y&&t(y,["mouseout","tapdragout"],i,{position:{x:p[0],y:p[1]}}),v&&t(v,["mouseover","tapdragover"],i,{position:{x:p[0],y:p[1]}}),e.hoverData.last=v),x)if(b){if(l.boxSelectionEnabled()&&C)x&&x.grabbed()&&(f(k),x.emit("free")),_();else if(x&&x.grabbed()&&e.nodeIsDraggable(x)){var D=!e.dragData.didDrag;D&&e.redrawHint("eles",!0),e.dragData.didDrag=!0;var R=[];e.hoverData.draggingEles||u(l.collection(k),{inDragLayer:!0});for(var I=0;I0&&e.redrawHint("eles",!0),e.dragData.possibleDragElements=l=[]),t(s,["mouseup","tapend","vmouseup"],r,{position:{x:o[0],y:o[1]}}),e.dragData.didDrag||e.hoverData.dragged||e.hoverData.selecting||e.hoverData.isOverThresholdDrag||t(c,["click","tap","vclick"],r,{position:{x:o[0],y:o[1]}}),s!=c||e.dragData.didDrag||e.hoverData.selecting||null!=s&&s._private.selectable&&(e.hoverData.dragging||("additive"===i.selectionType()||u?s.selected()?s.unselect():s.select():u||(i.$(":selected").unmerge(s).unselect(),s.select())),e.redrawHint("eles",!0)),e.hoverData.selecting){var h=i.collection(e.getAllInBox(a[0],a[1],a[2],a[3]));e.redrawHint("select",!0),h.length>0&&e.redrawHint("eles",!0),i.emit("boxend");var g=function(e){return e.selectable()&&!e.selected()};"additive"===i.selectionType()||u||i.$(":selected").unmerge(h).unselect(),h.emit("box").stdFilter(g).select().emit("boxselect"),e.redraw()}if(e.hoverData.dragging&&(e.hoverData.dragging=!1,e.redrawHint("select",!0),e.redrawHint("eles",!0),e.redraw()),!a[4]){e.redrawHint("drag",!0),e.redrawHint("eles",!0);var m=c&&c.grabbed();f(l),m&&c.emit("free")}}a[4]=0,e.hoverData.down=null,e.hoverData.cxtStarted=!1,e.hoverData.draggingEles=!1,e.hoverData.selecting=!1,e.hoverData.isOverThresholdDrag=!1,e.dragData.didDrag=!1,e.hoverData.dragged=!1,e.hoverData.dragDelta=[],e.hoverData.mdownPos=null,e.hoverData.mdownGPos=null}}),!1),e.registerBinding(e.container,"wheel",(function(t){if(!e.scrollingPage){var n,r=e.cy,i=e.projectIntoViewport(t.clientX,t.clientY),o=[i[0]*r.zoom()+r.pan().x,i[1]*r.zoom()+r.pan().y];e.hoverData.draggingEles||e.hoverData.dragging||e.hoverData.cxtStarted||0!==e.selection[4]?t.preventDefault():r.panningEnabled()&&r.userPanningEnabled()&&r.zoomingEnabled()&&r.userZoomingEnabled()&&(t.preventDefault(),e.data.wheelZooming=!0,clearTimeout(e.data.wheelTimeout),e.data.wheelTimeout=setTimeout((function(){e.data.wheelZooming=!1,e.redrawHint("eles",!0),e.redraw()}),150),n=null!=t.deltaY?t.deltaY/-250:null!=t.wheelDeltaY?t.wheelDeltaY/1e3:t.wheelDelta/1e3,n*=e.wheelSensitivity,1===t.deltaMode&&(n*=33),r.zoom({level:r.zoom()*Math.pow(10,n),renderedPosition:{x:o[0],y:o[1]}}))}}),!0),e.registerBinding(window,"scroll",(function(t){e.scrollingPage=!0,clearTimeout(e.scrollingPageTimeout),e.scrollingPageTimeout=setTimeout((function(){e.scrollingPage=!1}),250)}),!0),e.registerBinding(e.container,"mouseout",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseout",position:{x:n[0],y:n[1]}})}),!1),e.registerBinding(e.container,"mouseover",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseover",position:{x:n[0],y:n[1]}})}),!1);var T,P,D,R,I=function(e,t,n,r){return Math.sqrt((n-e)*(n-e)+(r-t)*(r-t))},N=function(e,t,n,r){return(n-e)*(n-e)+(r-t)*(r-t)};if(e.registerBinding(e.container,"touchstart",T=function(n){if(j(n)){e.touchData.capture=!0,e.data.bgActivePosistion=void 0;var r=e.cy,i=e.touchData.now,o=e.touchData.earlier;if(n.touches[0]){var a=e.projectIntoViewport(n.touches[0].clientX,n.touches[0].clientY);i[0]=a[0],i[1]=a[1]}if(n.touches[1]&&(a=e.projectIntoViewport(n.touches[1].clientX,n.touches[1].clientY),i[2]=a[0],i[3]=a[1]),n.touches[2]&&(a=e.projectIntoViewport(n.touches[2].clientX,n.touches[2].clientY),i[4]=a[0],i[5]=a[1]),n.touches[1]){f(e.dragData.touchDragEles);var s=e.findContainerClientCoords();S=s[0],$=s[1],C=s[2],_=s[3],v=n.touches[0].clientX-S,b=n.touches[0].clientY-$,y=n.touches[1].clientX-S,x=n.touches[1].clientY-$,O=0<=v&&v<=C&&0<=y&&y<=C&&0<=b&&b<=_&&0<=x&&x<=_;var c=r.pan(),p=r.zoom();if(w=I(v,b,y,x),k=N(v,b,y,x),E=[((A=[(v+y)/2,(b+x)/2])[0]-c.x)/p,(A[1]-c.y)/p],k<4e4&&!n.touches[2]){var h=e.findNearestElement(i[0],i[1],!0,!0),g=e.findNearestElement(i[2],i[3],!0,!0);return h&&h.isNode()?(h.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=h):g&&g.isNode()?(g.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=g):r.emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxt=!0,e.touchData.cxtDragged=!1,e.data.bgActivePosistion=void 0,void e.redraw()}}if(n.touches[2]);else if(n.touches[1]);else if(n.touches[0]){var m=e.findNearestElements(i[0],i[1],!0,!0),T=m[0];if(null!=T&&(T.activate(),e.touchData.start=T,e.touchData.starts=m,e.nodeIsGrabbable(T))){var P=e.dragData.touchDragEles=[],D=null;e.redrawHint("eles",!0),e.redrawHint("drag",!0),T.selected()?(D=r.$((function(t){return t.selected()&&e.nodeIsGrabbable(t)})),u(D,{addToList:P})):d(T,{addToList:P}),l(T);var R=function(e){return{originalEvent:n,type:e,position:{x:i[0],y:i[1]}}};T.emit(R("grabon")),D?D.forEach((function(e){e.emit(R("grab"))})):T.emit(R("grab"))}t(T,["touchstart","tapstart","vmousedown"],n,{position:{x:i[0],y:i[1]}}),null==T&&(e.data.bgActivePosistion={x:a[0],y:a[1]},e.redrawHint("select",!0),e.redraw()),e.touchData.singleTouchMoved=!1,e.touchData.singleTouchStartTime=+new Date,clearTimeout(e.touchData.tapholdTimeout),e.touchData.tapholdTimeout=setTimeout((function(){!1!==e.touchData.singleTouchMoved||e.pinching||e.touchData.selecting||(t(e.touchData.start,["taphold"],n,{position:{x:i[0],y:i[1]}}),e.touchData.start||r.$(":selected").unselect())}),e.tapholdDuration)}if(n.touches.length>=1){for(var M=e.touchData.startPosition=[],z=0;z=e.touchTapThreshold2}if(i&&e.touchData.cxt){n.preventDefault();var D=n.touches[0].clientX-S,R=n.touches[0].clientY-$,M=n.touches[1].clientX-S,z=n.touches[1].clientY-$,L=N(D,R,M,z);if(L/k>=2.25||L>=22500){e.touchData.cxt=!1,e.data.bgActivePosistion=void 0,e.redrawHint("select",!0);var B={originalEvent:n,type:"cxttapend",position:{x:c[0],y:c[1]}};e.touchData.start?(e.touchData.start.unactivate().emit(B),e.touchData.start=null):l.emit(B)}}if(i&&e.touchData.cxt){B={originalEvent:n,type:"cxtdrag",position:{x:c[0],y:c[1]}},e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),e.touchData.start?e.touchData.start.emit(B):l.emit(B),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxtDragged=!0;var F=e.findNearestElement(c[0],c[1],!0,!0);e.touchData.cxtOver&&F===e.touchData.cxtOver||(e.touchData.cxtOver&&e.touchData.cxtOver.emit({originalEvent:n,type:"cxtdragout",position:{x:c[0],y:c[1]}}),e.touchData.cxtOver=F,F&&F.emit({originalEvent:n,type:"cxtdragover",position:{x:c[0],y:c[1]}}))}else if(i&&n.touches[2]&&l.boxSelectionEnabled())n.preventDefault(),e.data.bgActivePosistion=void 0,this.lastThreeTouch=+new Date,e.touchData.selecting||l.emit("boxstart"),e.touchData.selecting=!0,e.redrawHint("select",!0),s&&0!==s.length&&void 0!==s[0]?(s[2]=(c[0]+c[2]+c[4])/3,s[3]=(c[1]+c[3]+c[5])/3):(s[0]=(c[0]+c[2]+c[4])/3,s[1]=(c[1]+c[3]+c[5])/3,s[2]=(c[0]+c[2]+c[4])/3+1,s[3]=(c[1]+c[3]+c[5])/3+1),s[4]=1,e.touchData.selecting=!0,e.redraw();else if(i&&n.touches[1]&&l.zoomingEnabled()&&l.panningEnabled()&&l.userZoomingEnabled()&&l.userPanningEnabled()){if(n.preventDefault(),e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),ee=e.dragData.touchDragEles){e.redrawHint("drag",!0);for(var q=0;q0)return h[0]}return null},p=Object.keys(d),h=0;h0?f:r.roundRectangleIntersectLine(o,a,e,t,n,i,s)},checkPoint:function(e,t,n,i,o,a,s){var l=r.getRoundRectangleRadius(i,o),c=2*l;if(r.pointInsidePolygon(e,t,this.points,a,s,i,o-c,[0,-1],n))return!0;if(r.pointInsidePolygon(e,t,this.points,a,s,i-c,o,[0,-1],n))return!0;var u=i/2+2*n,d=o/2+2*n,f=[a-u,s-d,a-u,s,a+u,s,a+u,s-d];return!!r.pointInsidePolygonPoints(e,t,f)||!!r.checkInEllipse(e,t,c,c,a+i/2-l,s+o/2-l,n)||!!r.checkInEllipse(e,t,c,c,a-i/2+l,s+o/2-l,n)}}},registerNodeShapes:function(){var e=this.nodeShapes={},t=this;this.generateEllipse(),this.generatePolygon("triangle",r.generateUnitNgonPointsFitToSquare(3,0)),this.generatePolygon("rectangle",r.generateUnitNgonPointsFitToSquare(4,0)),e.square=e.rectangle,this.generateRoundRectangle(),this.generateCutRectangle(),this.generateBarrel(),this.generateBottomRoundrectangle(),this.generatePolygon("diamond",[0,1,1,0,0,-1,-1,0]),this.generatePolygon("pentagon",r.generateUnitNgonPointsFitToSquare(5,0)),this.generatePolygon("hexagon",r.generateUnitNgonPointsFitToSquare(6,0)),this.generatePolygon("heptagon",r.generateUnitNgonPointsFitToSquare(7,0)),this.generatePolygon("octagon",r.generateUnitNgonPointsFitToSquare(8,0));var n=new Array(20),i=r.generateUnitNgonPoints(5,0),o=r.generateUnitNgonPoints(5,Math.PI/5),a=.5*(3-Math.sqrt(5));a*=1.57;for(var s=0;s0&&t.data.lyrTxrCache.invalidateElements(n)}))}l.CANVAS_LAYERS=3,l.SELECT_BOX=0,l.DRAG=1,l.NODE=2,l.BUFFER_COUNT=3,l.TEXTURE_BUFFER=0,l.MOTIONBLUR_BUFFER_NODE=1,l.MOTIONBLUR_BUFFER_DRAG=2,l.redrawHint=function(e,t){var n=this;switch(e){case"eles":n.data.canvasNeedsRedraw[l.NODE]=t;break;case"drag":n.data.canvasNeedsRedraw[l.DRAG]=t;break;case"select":n.data.canvasNeedsRedraw[l.SELECT_BOX]=t}};var u="undefined"!=typeof Path2D;l.path2dEnabled=function(e){if(void 0===e)return this.pathsEnabled;this.pathsEnabled=!!e},l.usePaths=function(){return u&&this.pathsEnabled},[n(126),n(127),n(128),n(129),n(130),n(131),n(132),n(133),n(134),n(135)].forEach((function(e){r.extend(l,e)})),e.exports=s},function(e,t,n){"use strict";var r=n(2),i=n(1),o=n(9),a=n(19),s={dequeue:"dequeue",downscale:"downscale",highQuality:"highQuality"},l=function(e){this.renderer=e,this.onDequeues=[],this.setupDequeueing()},c=l.prototype;c.reasons=s,c.getTextureQueue=function(e){return this.eleImgCaches=this.eleImgCaches||{},this.eleImgCaches[e]=this.eleImgCaches[e]||[]},c.getRetiredTextureQueue=function(e){var t=this.eleImgCaches.retired=this.eleImgCaches.retired||{};return t[e]=t[e]||[]},c.getElementQueue=function(){return this.eleCacheQueue=this.eleCacheQueue||new o((function(e,t){return t.reqs-e.reqs}))},c.getElementIdToQueue=function(){return this.eleIdToCacheQueue=this.eleIdToCacheQueue||{}},c.getElement=function(e,t,n,i,o){var a=this,l=this.renderer,c=e._private.rscratch,u=l.cy.zoom();if(0===t.w||0===t.h||!e.visible())return null;if(null==i&&(i=Math.ceil(r.log2(u*n))),i<-4)i=-4;else if(u>=3.99||i>2)return null;var d,f=Math.pow(2,i),p=t.h*f,h=t.w*f,g=c.imgCaches=c.imgCaches||{},m=g[i];if(m)return m;if(d=p<=25?25:p<=50?50:50*Math.ceil(p/50),p>1024||h>1024||e.isEdge()||e.isParent())return null;var v=a.getTextureQueue(d),b=v[v.length-2],y=function(){return a.recycleTexture(d,h)||a.addTexture(d,h)};b||(b=v[v.length-1]),b||(b=y()),b.width-b.usedWidthi;$--)C=a.getElement(e,t,n,$,s.downscale);_()}else{var O;if(!A&&!E&&!S)for($=i-1;$>=-4;$--){var j;if(j=g[$]){O=j;break}}if(k(O))return a.queueElement(e,i),O;b.context.translate(b.usedWidth,0),b.context.scale(f,f),l.drawElement(b.context,e,t,w),b.context.scale(1/f,1/f),b.context.translate(-b.usedWidth,0)}return m=g[i]={ele:e,x:b.usedWidth,texture:b,level:i,scale:f,width:h,height:p,scaledLabelShown:w},b.usedWidth+=Math.ceil(h+8),b.eleCaches.push(m),a.checkTextureFullness(b),m},c.invalidateElement=function(e){var t=e._private.rscratch.imgCaches;if(t)for(var n=-4;n<=2;n++){var r=t[n];if(r){var o=r.texture;o.invalidatedWidth+=r.width,t[n]=null,i.removeFromArray(o.eleCaches,r),this.removeFromQueue(e),this.checkTextureUtility(o)}}},c.checkTextureUtility=function(e){e.invalidatedWidth>=.5*e.width&&this.retireTexture(e)},c.checkTextureFullness=function(e){var t=this.getTextureQueue(e.height);e.usedWidth/e.width>.8&&e.fullnessChecks>=10?i.removeFromArray(t,e):e.fullnessChecks++},c.retireTexture=function(e){var t=e.height,n=this.getTextureQueue(t);i.removeFromArray(n,e),e.retired=!0;for(var r=e.eleCaches,o=0;o=t)return a.retired=!1,a.usedWidth=0,a.invalidatedWidth=0,a.fullnessChecks=0,i.clearArray(a.eleCaches),a.context.setTransform(1,0,0,1,0,0),a.context.clearRect(0,0,a.width,a.height),i.removeFromArray(r,a),n.push(a),a}},c.queueElement=function(e,t){var n=this.getElementQueue(),r=this.getElementIdToQueue(),i=e.id(),o=r[i];if(o)o.level=Math.max(o.level,t),o.reqs++,n.updateItem(o);else{var a={ele:e,level:t,reqs:1};n.push(a),r[i]=a}},c.dequeue=function(e){for(var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=[],i=0;i<1&&t.size()>0;i++){var o=t.pop(),a=o.ele;if(null==a._private.rscratch.imgCaches[o.level]){n[a.id()]=null,r.push(o);var l=a.boundingBox();this.getElement(a,l,e,o.level,s.dequeue)}}return r},c.removeFromQueue=function(e){var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=n[e.id()];null!=r&&(r.reqs=i.MAX_INT,t.updateItem(r),t.pop(),n[e.id()]=null)},c.onDequeue=function(e){this.onDequeues.push(e)},c.offDequeue=function(e){i.removeFromArray(this.onDequeues,e)},c.setupDequeueing=a.setupDequeueing({deqRedrawThreshold:100,deqCost:.15,deqAvgCost:.1,deqNoDrawCost:.9,deqFastCost:.9,deq:function(e,t,n){return e.dequeue(t,n)},onDeqd:function(e,t){for(var n=0;n=3.99||n>2)return null;o.validateLayersElesOrdering(n,e);var l,c,u=o.layersByLevel,d=Math.pow(2,n),f=u[n]=u[n]||[];if(o.levelIsComplete(n,e))return f;!function(){var t=function(t){if(o.validateLayersElesOrdering(t,e),o.levelIsComplete(t,e))return c=u[t],!0},i=function(e){if(!c)for(var r=n+e;-4<=r&&r<=2&&!t(r);r+=e);};i(1),i(-1);for(var a=f.length-1;a>=0;a--){var s=f[a];s.invalid&&r.removeFromArray(f,s)}}();var p=function(t){var r=(t=t||{}).after;if(function(){if(!l){l=i.makeBoundingBox();for(var t=0;t16e6)return null;var a=o.makeLayer(l,n);if(null!=r){var s=f.indexOf(r)+1;f.splice(s,0,a)}else(void 0===t.insert||t.insert)&&f.unshift(a);return a};if(o.skipping&&!s)return null;for(var h=null,g=e.length/1,m=!s,v=0;v=g||!i.boundingBoxInBoundingBox(h.bb,b.boundingBox()))&&!(h=p({insert:!0,after:h})))return null;c||m?o.queueLayer(h,b):o.drawEleInLayer(h,b,n,t),h.eles.push(b),x[n]=h}}return c||(m?null:f)},c.getEleLevelForLayerLevel=function(e,t){return e},c.drawEleInLayer=function(e,t,n,r){var i=this.renderer,o=e.context,a=t.boundingBox();if(0!==a.w&&0!==a.h&&t.visible()){var s=this.eleTxrCache,l=s.reasons.highQuality;n=this.getEleLevelForLayerLevel(n,r);var c=s.getElement(t,a,null,n,l);c?(f(o,!1),o.drawImage(c.texture.canvas,c.x,0,c.width,c.height,a.x1,a.y1,a.w,a.h),f(o,!0)):i.drawElement(o,t)}},c.levelIsComplete=function(e,t){var n=this.layersByLevel[e];if(!n||0===n.length)return!1;for(var r=0,i=0;i0)return!1;if(o.invalid)return!1;r+=o.eles.length}return r===t.length},c.validateLayersElesOrdering=function(e,t){var n=this.layersByLevel[e];if(n)for(var r=0;r0){e=!0;break}}return e},c.invalidateElements=function(e){var t=this;t.lastInvalidationTime=r.performanceNow(),0!==e.length&&t.haveLayers()&&t.updateElementsInLayers(e,(function(e,n,r){t.invalidateLayer(e)}))},c.invalidateLayer=function(e){if(this.lastInvalidationTime=r.performanceNow(),!e.invalid){var t=e.level,n=e.eles,i=this.layersByLevel[t];r.removeFromArray(i,e),e.elesQueue=[],e.invalid=!0,e.replacement&&(e.replacement.invalid=!0);for(var o=0;o0&&void 0!==arguments[0]?arguments[0]:f;e.lineWidth=h,e.lineCap="butt",i.strokeStyle(e,d[0],d[1],d[2],n),i.drawEdgePath(t,e,o.allpts,p)},m=function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:f;i.drawArrowheads(e,t,n)};if(e.lineJoin="round","yes"===t.pstyle("ghost").value){var v=t.pstyle("ghost-offset-x").pfValue,b=t.pstyle("ghost-offset-y").pfValue,y=t.pstyle("ghost-opacity").value,x=f*y;e.translate(v,b),g(x),m(x),e.translate(-v,-b)}g(),m(),function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:c;e.lineWidth=l,"self"!==o.edgeType||a?e.lineCap="round":e.lineCap="butt",i.strokeStyle(e,u[0],u[1],u[2],n),i.drawEdgePath(t,e,o.allpts,"solid")}(),i.drawElementText(e,t,r),n&&e.translate(s.x1,s.y1)}},drawEdgePath:function(e,t,n,r){var i=e._private.rscratch,o=t,a=void 0,s=!1,l=this.usePaths();if(l){var c=n.join("$");i.pathCacheKey&&i.pathCacheKey===c?(a=t=i.pathCache,s=!0):(a=t=new Path2D,i.pathCacheKey=c,i.pathCache=a)}if(o.setLineDash)switch(r){case"dotted":o.setLineDash([1,1]);break;case"dashed":o.setLineDash([6,3]);break;case"solid":o.setLineDash([])}if(!s&&!i.badLine)switch(t.beginPath&&t.beginPath(),t.moveTo(n[0],n[1]),i.edgeType){case"bezier":case"self":case"compound":case"multibezier":if(e.hasClass("horizontal")){var u=n[4],d=n[5],f=(n[0]+n[4])/2;t.lineTo(n[0]+10,n[1]),t.bezierCurveTo(f,n[1],f,n[5],n[4]-10,n[5]),t.lineTo(u,d)}else if(e.hasClass("vertical")){var p=n[4],h=n[5],g=(n[1]+n[5])/2;t.bezierCurveTo(n[0],g,n[4],g,n[4],n[5]-10),t.lineTo(p,h)}else for(var m=2;m+30||j>0&&O>0){var P=f-T;switch(k){case"left":P-=m;break;case"center":P-=m/2}var D=p-v-T,R=m+2*T,I=v+2*T;if(_>0){var N=e.fillStyle,M=t.pstyle("text-background-color").value;e.fillStyle="rgba("+M[0]+","+M[1]+","+M[2]+","+_*o+")","roundrectangle"==t.pstyle("text-background-shape").strValue?(s=P,l=D,c=R,u=I,d=(d=2)||5,(a=e).beginPath(),a.moveTo(s+d,l),a.lineTo(s+c-d,l),a.quadraticCurveTo(s+c,l,s+c,l+d),a.lineTo(s+c,l+u-d),a.quadraticCurveTo(s+c,l+u,s+c-d,l+u),a.lineTo(s+d,l+u),a.quadraticCurveTo(s,l+u,s,l+u-d),a.lineTo(s,l+d),a.quadraticCurveTo(s,l,s+d,l),a.closePath(),a.fill()):e.fillRect(P,D,R,I),e.fillStyle=N}if(j>0&&O>0){var z=e.strokeStyle,L=e.lineWidth,B=t.pstyle("text-border-color").value,F=t.pstyle("text-border-style").value;if(e.strokeStyle="rgba("+B[0]+","+B[1]+","+B[2]+","+O*o+")",e.lineWidth=j,e.setLineDash)switch(F){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"double":e.lineWidth=j/4,e.setLineDash([]);break;case"solid":e.setLineDash([])}if(e.strokeRect(P,D,R,I),"double"===F){var q=j/2;e.strokeRect(P+q,D+q,R-2*q,I-2*q)}e.setLineDash&&e.setLineDash([]),e.lineWidth=L,e.strokeStyle=z}}var V=2*t.pstyle("text-outline-width").pfValue;if(V>0&&(e.lineWidth=V),"wrap"===t.pstyle("text-wrap").value){var U=r.getPrefixedProperty(i,"labelWrapCachedLines",n),H=v/U.length;switch(A){case"top":p-=(U.length-1)*H;break;case"center":case"bottom":p-=(U.length-1)*H}for(var G=0;G0&&e.strokeText(U[G],f,p),e.fillText(U[G],f,p),p+=H}else V>0&&e.strokeText(h,f,p),e.fillText(h,f,p);0!==E&&(e.rotate(-E),e.translate(-$,-C))}}},e.exports=o},function(e,t,n){"use strict";var r=n(0),i={drawNode:function(e,t,n,i){var o,a,s=this,l=t._private,c=l.rscratch,u=t.position();if(r.number(u.x)&&r.number(u.y)&&t.visible()){var d=t.effectiveOpacity(),f=s.usePaths(),p=void 0,h=!1,g=t.padding();o=t.width()+2*g,a=t.height()+2*g;var m=void 0;n&&(m=n,e.translate(-m.x1,-m.y1));for(var v=t.pstyle("background-image").value,b=new Array(v.length),y=new Array(v.length),x=0,w=0;w0&&void 0!==arguments[0]?arguments[0]:C;s.fillStyle(e,$[0],$[1],$[2],t)},P=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:j;s.strokeStyle(e,_[0],_[1],_[2],t)},D=t.pstyle("shape").strValue,R=t.pstyle("shape-polygon-points").pfValue;if(f){var I=D+"$"+o+"$"+a+("polygon"===D?"$"+R.join("$"):"");e.translate(u.x,u.y),c.pathCacheKey===I?(p=c.pathCache,h=!0):(p=new Path2D,c.pathCacheKey=I,c.pathCache=p)}var N,M,z,L=function(){if(!h){var n=u;f&&(n={x:0,y:0}),s.nodeShapes[s.getNodeShape(t)].draw(p||e,n.x,n.y,o,a)}f?e.fill(p):e.fill()},B=function(){for(var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,r=l.backgrounding,i=0,o=0;o0&&void 0!==arguments[0]&&arguments[0],r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:d;s.hasPie(t)&&(s.drawPie(e,t,r),n&&(f||s.nodeShapes[s.getNodeShape(t)].draw(e,u.x,u.y,o,a)))},q=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,n=(E>0?E:-E)*t,r=E>0?0:255;0!==E&&(s.fillStyle(e,r,r,r,n),f?e.fill(p):e.fill())},V=function(){if(S>0){if(e.lineWidth=S,e.lineCap="butt",e.setLineDash)switch(O){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"solid":case"double":e.setLineDash([])}if(f?e.stroke(p):e.stroke(),"double"===O){e.lineWidth=S/3;var t=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",f?e.stroke(p):e.stroke(),e.globalCompositeOperation=t}e.setLineDash&&e.setLineDash([])}};if("yes"===t.pstyle("ghost").value){var U=t.pstyle("ghost-offset-x").pfValue,H=t.pstyle("ghost-offset-y").pfValue,G=t.pstyle("ghost-opacity").value,W=G*d;e.translate(U,H),T(G*C),L(),B(W),F(0!==E||0!==S),q(W),P(G*j),V(),e.translate(-U,-H)}T(),L(),B(),F(0!==E||0!==S),q(),P(),V(),f&&e.translate(-u.x,-u.y),s.drawElementText(e,t,i),N=t.pstyle("overlay-padding").pfValue,M=t.pstyle("overlay-opacity").value,z=t.pstyle("overlay-color").value,M>0&&(s.fillStyle(e,z[0],z[1],z[2],M),s.nodeShapes.roundrectangle.draw(e,u.x,u.y,o+2*N,a+2*N),e.fill()),n&&e.translate(m.x1,m.y1)}},hasPie:function(e){return(e=e[0])._private.hasPie},drawPie:function(e,t,n,r){t=t[0],r=r||t.position();var i=t.cy().style(),o=t.pstyle("pie-size"),a=r.x,s=r.y,l=t.width(),c=t.height(),u=Math.min(l,c)/2,d=0;this.usePaths()&&(a=0,s=0),"%"===o.units?u*=o.pfValue:void 0!==o.pfValue&&(u=o.pfValue/2);for(var f=1;f<=i.pieBackgroundN;f++){var p=t.pstyle("pie-"+f+"-background-size").value,h=t.pstyle("pie-"+f+"-background-color").value,g=t.pstyle("pie-"+f+"-background-opacity").value*n,m=p/100;m+d>1&&(m=1-d);var v=1.5*Math.PI+2*Math.PI*d,b=v+2*Math.PI*m;0===p||d>=1||d+m>1||(e.beginPath(),e.moveTo(a,s),e.arc(a,s,u,v,b),e.closePath(),this.fillStyle(e,h[0],h[1],h[2],g),e.fill(),d+=m)}}};e.exports=i},function(e,t,n){"use strict";var r={},i=n(1);r.getPixelRatio=function(){var e=this.data.contexts[0];if(null!=this.forcedPixelRatio)return this.forcedPixelRatio;var t=e.backingStorePixelRatio||e.webkitBackingStorePixelRatio||e.mozBackingStorePixelRatio||e.msBackingStorePixelRatio||e.oBackingStorePixelRatio||e.backingStorePixelRatio||1;return(window.devicePixelRatio||1)/t},r.paintCache=function(e){for(var t,n=this.paintCaches=this.paintCaches||[],r=!0,i=0;is.minMbLowQualFrames&&(s.motionBlurPxRatio=s.mbPxRBlurry)),s.clearingMotionBlur&&(s.motionBlurPxRatio=1),s.textureDrawLastFrame&&!f&&(d[s.NODE]=!0,d[s.SELECT_BOX]=!0);var y=c.style()._private.coreStyle,x=c.zoom(),w=void 0!==o?o:x,k=c.pan(),A={x:k.x,y:k.y},E={zoom:x,pan:{x:k.x,y:k.y}},S=s.prevViewport;void 0===S||E.zoom!==S.zoom||E.pan.x!==S.pan.x||E.pan.y!==S.pan.y||m&&!g||(s.motionBlurPxRatio=1),a&&(A=a),w*=l,A.x*=l,A.y*=l;var $=s.getCachedZSortedEles();function C(e,t,n,r,i){var o=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",s.fillStyle(e,255,255,255,s.motionBlurTransparency),e.fillRect(t,n,r,i),e.globalCompositeOperation=o}function _(e,r){var i,l,c,d;s.clearingMotionBlur||e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]&&e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]?(i=A,l=w,c=s.canvasWidth,d=s.canvasHeight):(i={x:k.x*h,y:k.y*h},l=x*h,c=s.canvasWidth*h,d=s.canvasHeight*h),e.setTransform(1,0,0,1,0,0),"motionBlur"===r?C(e,0,0,c,d):t||void 0!==r&&!r||e.clearRect(0,0,c,d),n||(e.translate(i.x,i.y),e.scale(l,l)),a&&e.translate(a.x,a.y),o&&e.scale(o,o)}if(f||(s.textureDrawLastFrame=!1),f){if(s.textureDrawLastFrame=!0,!s.textureCache){s.textureCache={},s.textureCache.bb=c.mutableElements().boundingBox(),s.textureCache.texture=s.data.bufferCanvases[s.TEXTURE_BUFFER];var O=s.data.bufferContexts[s.TEXTURE_BUFFER];O.setTransform(1,0,0,1,0,0),O.clearRect(0,0,s.canvasWidth*s.textureMult,s.canvasHeight*s.textureMult),s.render({forcedContext:O,drawOnlyNodeLayer:!0,forcedPxRatio:l*s.textureMult}),(E=s.textureCache.viewport={zoom:c.zoom(),pan:c.pan(),width:s.canvasWidth,height:s.canvasHeight}).mpan={x:(0-E.pan.x)/E.zoom,y:(0-E.pan.y)/E.zoom}}d[s.DRAG]=!1,d[s.NODE]=!1;var j=u.contexts[s.NODE],T=s.textureCache.texture;E=s.textureCache.viewport,s.textureCache.bb,j.setTransform(1,0,0,1,0,0),p?C(j,0,0,E.width,E.height):j.clearRect(0,0,E.width,E.height);var P=y["outside-texture-bg-color"].value,D=y["outside-texture-bg-opacity"].value;s.fillStyle(j,P[0],P[1],P[2],D),j.fillRect(0,0,E.width,E.height),x=c.zoom(),_(j,!1),j.clearRect(E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l),j.drawImage(T,E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l)}else s.textureOnViewport&&!t&&(s.textureCache=null);var R=c.extent(),I=s.pinching||s.hoverData.dragging||s.swipePanning||s.data.wheelZooming||s.hoverData.draggingEles,N=s.hideEdgesOnViewport&&I,M=[];if(M[s.NODE]=!d[s.NODE]&&p&&!s.clearedForMotionBlur[s.NODE]||s.clearingMotionBlur,M[s.NODE]&&(s.clearedForMotionBlur[s.NODE]=!0),M[s.DRAG]=!d[s.DRAG]&&p&&!s.clearedForMotionBlur[s.DRAG]||s.clearingMotionBlur,M[s.DRAG]&&(s.clearedForMotionBlur[s.DRAG]=!0),d[s.NODE]||n||r||M[s.NODE]){var z=p&&!M[s.NODE]&&1!==h;_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]:u.contexts[s.NODE]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.nondrag,l,R):s.drawLayeredElements(j,$.nondrag,l,R),s.debug&&s.drawDebugPoints(j,$.nondrag),n||p||(d[s.NODE]=!1)}if(!r&&(d[s.DRAG]||n||M[s.DRAG])&&(z=p&&!M[s.DRAG]&&1!==h,_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]:u.contexts[s.DRAG]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.drag,l,R):s.drawCachedElements(j,$.drag,l,R),s.debug&&s.drawDebugPoints(j,$.drag),n||p||(d[s.DRAG]=!1)),s.showFps||!r&&d[s.SELECT_BOX]&&!n){if(_(j=t||u.contexts[s.SELECT_BOX]),1==s.selection[4]&&(s.hoverData.selecting||s.touchData.selecting)){x=s.cy.zoom();var L=y["selection-box-border-width"].value/x;j.lineWidth=L,j.fillStyle="rgba("+y["selection-box-color"].value[0]+","+y["selection-box-color"].value[1]+","+y["selection-box-color"].value[2]+","+y["selection-box-opacity"].value+")",j.fillRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]),L>0&&(j.strokeStyle="rgba("+y["selection-box-border-color"].value[0]+","+y["selection-box-border-color"].value[1]+","+y["selection-box-border-color"].value[2]+","+y["selection-box-opacity"].value+")",j.strokeRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]))}if(u.bgActivePosistion&&!s.hoverData.selecting){x=s.cy.zoom();var B=u.bgActivePosistion;j.fillStyle="rgba("+y["active-bg-color"].value[0]+","+y["active-bg-color"].value[1]+","+y["active-bg-color"].value[2]+","+y["active-bg-opacity"].value+")",j.beginPath(),j.arc(B.x,B.y,y["active-bg-size"].pfValue/x,0,2*Math.PI),j.fill()}var F=s.lastRedrawTime;if(s.showFps&&F){F=Math.round(F);var q=Math.round(1e3/F);j.setTransform(1,0,0,1,0,0),j.fillStyle="rgba(255, 0, 0, 0.75)",j.strokeStyle="rgba(255, 0, 0, 0.75)",j.lineWidth=1,j.fillText("1 frame = "+F+" ms = "+q+" fps",0,20),j.strokeRect(0,30,250,20),j.fillRect(0,30,250*Math.min(q/60,1),20)}n||(d[s.SELECT_BOX]=!1)}if(p&&1!==h){var V=u.contexts[s.NODE],U=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_NODE],H=u.contexts[s.DRAG],G=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_DRAG],W=function(e,t,n){e.setTransform(1,0,0,1,0,0),n||!b?e.clearRect(0,0,s.canvasWidth,s.canvasHeight):C(e,0,0,s.canvasWidth,s.canvasHeight);var r=h;e.drawImage(t,0,0,s.canvasWidth*r,s.canvasHeight*r,0,0,s.canvasWidth,s.canvasHeight)};(d[s.NODE]||M[s.NODE])&&(W(V,U,M[s.NODE]),d[s.NODE]=!1),(d[s.DRAG]||M[s.DRAG])&&(W(H,G,M[s.DRAG]),d[s.DRAG]=!1)}s.prevViewport=E,s.clearingMotionBlur&&(s.clearingMotionBlur=!1,s.motionBlurCleared=!0,s.motionBlur=!0),p&&(s.motionBlurTimeout=setTimeout((function(){s.motionBlurTimeout=null,s.clearedForMotionBlur[s.NODE]=!1,s.clearedForMotionBlur[s.DRAG]=!1,s.motionBlur=!1,s.clearingMotionBlur=!f,s.mbFrames=0,d[s.NODE]=!0,d[s.DRAG]=!0,s.redraw()}),100)),t||c.emit("render")},e.exports=r},function(e,t,n){"use strict";for(var r=n(2),i={drawPolygonPath:function(e,t,n,r,i,o){var a=r/2,s=i/2;e.beginPath&&e.beginPath(),e.moveTo(t+a*o[0],n+s*o[1]);for(var l=1;l0&&a>0){p.clearRect(0,0,o,a),p.globalCompositeOperation="source-over";var h=this.getCachedZSortedEles();if(e.full)p.translate(-n.x1*c,-n.y1*c),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(n.x1*c,n.y1*c);else{var g=t.pan(),m={x:g.x*c,y:g.y*c};c*=t.zoom(),p.translate(m.x,m.y),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(-m.x,-m.y)}e.bg&&(p.globalCompositeOperation="destination-over",p.fillStyle=e.bg,p.rect(0,0,o,a),p.fill())}return f},i.png=function(e){return a(e,this.bufferCanvasImage(e),"image/png")},i.jpg=function(e){return a(e,this.bufferCanvasImage(e),"image/jpeg")},e.exports=i},function(e,t,n){"use strict";var r={nodeShapeImpl:function(e,t,n,r,i,o,a){switch(e){case"ellipse":return this.drawEllipsePath(t,n,r,i,o);case"polygon":return this.drawPolygonPath(t,n,r,i,o,a);case"roundrectangle":return this.drawRoundRectanglePath(t,n,r,i,o);case"cutrectangle":return this.drawCutRectanglePath(t,n,r,i,o);case"bottomroundrectangle":return this.drawBottomRoundRectanglePath(t,n,r,i,o);case"barrel":return this.drawBarrelPath(t,n,r,i,o)}}};e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(18),a=function e(){if(!(this instanceof e))return new e;this.length=0},s=a.prototype;s.instanceString=function(){return"stylesheet"},s.selector=function(e){return this[this.length++]={selector:e,properties:[]},this},s.css=function(e,t){var n=this.length-1;if(r.string(e))this[n].properties.push({name:e,value:t});else if(r.plainObject(e))for(var a=e,s=0;s=0&&(e._idleTimeoutId=setTimeout((function(){e._onTimeout&&e._onTimeout()}),t))},n(239),t.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,t.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(this,n(35))},function(e,t,n){(function(e,t){!function(e,n){"use strict";if(!e.setImmediate){var r,i,o,a,s,l=1,c={},u=!1,d=e.document,f=Object.getPrototypeOf&&Object.getPrototypeOf(e);f=f&&f.setTimeout?f:e,"[object process]"==={}.toString.call(e.process)?r=function(e){t.nextTick((function(){h(e)}))}:!function(){if(e.postMessage&&!e.importScripts){var t=!0,n=e.onmessage;return e.onmessage=function(){t=!1},e.postMessage("","*"),e.onmessage=n,t}}()?e.MessageChannel?((o=new MessageChannel).port1.onmessage=function(e){h(e.data)},r=function(e){o.port2.postMessage(e)}):d&&"onreadystatechange"in d.createElement("script")?(i=d.documentElement,r=function(e){var t=d.createElement("script");t.onreadystatechange=function(){h(e),t.onreadystatechange=null,i.removeChild(t),t=null},i.appendChild(t)}):r=function(e){setTimeout(h,0,e)}:(a="setImmediate$"+Math.random()+"$",s=function(t){t.source===e&&"string"==typeof t.data&&0===t.data.indexOf(a)&&h(+t.data.slice(a.length))},e.addEventListener?e.addEventListener("message",s,!1):e.attachEvent("onmessage",s),r=function(t){e.postMessage(a+t,"*")}),f.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n1)for(var n=1;n=t||n<0||m&&e-c>=o}function w(){var e=p();if(x(e))return k(e);s=setTimeout(w,function(e){var n=t-(e-l);return m?f(n,o-(e-c)):n}(e))}function k(e){return s=void 0,v&&r?b(e):(r=i=void 0,a)}function A(){var e=p(),n=x(e);if(r=arguments,i=this,l=e,n){if(void 0===s)return y(l);if(m)return s=setTimeout(w,t),b(l)}return void 0===s&&(s=setTimeout(w,t)),a}return t=g(t)||0,h(n)&&(u=!!n.leading,o=(m="maxWait"in n)?d(g(n.maxWait)||0,t):o,v="trailing"in n?!!n.trailing:v),A.cancel=function(){void 0!==s&&clearTimeout(s),c=0,r=l=i=s=void 0},A.flush=function(){return void 0===s?a:k(p())},A}}).call(this,n(35))},function(e,t,n){e.exports=n(243)},function(e,t,n){var r,i,o;(function(){var n,a,s,l,c,u,d,f,p,h,g,m,v,b,y;s=Math.floor,h=Math.min,a=function(e,t){return et?1:0},p=function(e,t,n,r,i){var o;if(null==n&&(n=0),null==i&&(i=a),n<0)throw new Error("lo must be non-negative");for(null==r&&(r=e.length);nn;0<=n?t++:t--)c.push(t);return c}.apply(this).reverse()).length;rg;0<=g?++u:--u)m.push(c(e,n));return m},b=function(e,t,n,r){var i,o,s;for(null==r&&(r=a),i=e[n];n>t&&r(i,o=e[s=n-1>>1])<0;)e[n]=o,n=s;return e[n]=i},y=function(e,t,n){var r,i,o,s,l;for(null==n&&(n=a),i=e.length,l=t,o=e[t],r=2*t+1;r'+e.content+"":s+=">"+e.content+"";var l=t(s);return l.data("selector",e.selector),l.data("on-click-function",e.onClickFunction),l.data("show",void 0===e.show||e.show),l}function y(){var e;l("active")&&(e=s.children(),t(e).each((function(){x(t(this))})),i.off("tapstart",n),s.remove(),c(s=void 0,void 0),c("active",!1),c("anyVisibleChild",!1))}function x(e){var n="string"==typeof e?t("#"+e):e,r=n.data("cy-context-menus-cxtfcn"),o=n.data("selector"),a=n.data("call-on-click-function"),s=n.data("cy-context-menus-cxtcorefcn");r&&i.off("cxttap",o,r),s&&i.off("cxttap",s),a&&n.off("click",a),n.remove()}"get"!==e&&(c("options",a=function(e,t){var n={};for(var r in e)n[r]=e[r];for(var r in t)n[r]=t[r];return n}(r,e)),l("active")&&y(),c("active",!0),o=u(a.contextMenuClasses),(s=t("
")).addClass("cy-context-menus-cxt-menu"),c("cxtMenu",s),t("body").append(s),s=s,g(a.menuItems),i.on("tapstart",n=function(){f(s),c("cxtMenuPosition",void 0),c("currentCyEvent",void 0)}),t(".cy-context-menus-cxt-menu").contextmenu((function(){return!1})));return function(e){return{isActive:function(){return l("active")},appendMenuItem:function(t){return m(t),e},appendMenuItems:function(t){return g(t),e},removeMenuItem:function(t){return x(t),e},setTrailingDivider:function(n,r){return function(e,n){var r=t("#"+e);n?r.addClass("cy-context-menus-divider"):r.removeClass("cy-context-menus-divider")}(n,r),e},insertBeforeMenuItem:function(t,n){return v(t,n),e},moveBeforeOtherMenuItem:function(n,r){return function(e,n){if(e!==n){var r=t("#"+e).detach(),i=t("#"+n);r.insertBefore(i)}}(n,r),e},disableMenuItem:function(n){return t("#"+n).attr("disabled",!0),e},enableMenuItem:function(n){return t("#"+n).attr("disabled",!1),e},hideMenuItem:function(n){return t("#"+n).data("show",!1),f(t("#"+n)),e},showMenuItem:function(n){return t("#"+n).data("show",!0),d(t("#"+n)),e},destroy:function(){return y(),e}}}(this)}))}};e.exports&&(e.exports=o),void 0===(r=function(){return o}.call(t,n,t,e))||(e.exports=r),"undefined"!=typeof cytoscape&&i&&o(cytoscape,i)}()},function(e,t,n){var r;r=function(e){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var i=t[r]={i:r,l:!1,exports:{}};return e[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var i in e)n.d(r,i,function(t){return e[t]}.bind(null,i));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=0)}([function(e,t,n){var r=n(1),i=function(e){e&&e("layout","dagre",r)};"undefined"!=typeof cytoscape&&i(cytoscape),e.exports=i},function(e,t,n){function r(e){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var i=n(2),o=n(3),a=n(4);function s(e){this.options=o({},i,e)}s.prototype.run=function(){var e=this.options,t=e.cy,n=e.eles,i=function(e,t){return"function"==typeof t?t.apply(e,[e]):t},o=e.boundingBox||{x1:0,y1:0,w:t.width(),h:t.height()};void 0===o.x2&&(o.x2=o.x1+o.w),void 0===o.w&&(o.w=o.x2-o.x1),void 0===o.y2&&(o.y2=o.y1+o.h),void 0===o.h&&(o.h=o.y2-o.y1);var s=new a.graphlib.Graph({multigraph:!0,compound:!0}),l={},c=function(e,t){null!=t&&(l[e]=t)};c("nodesep",e.nodeSep),c("edgesep",e.edgeSep),c("ranksep",e.rankSep),c("rankdir",e.rankDir),c("ranker",e.ranker),s.setGraph(l),s.setDefaultEdgeLabel((function(){return{}})),s.setDefaultNodeLabel((function(){return{}}));for(var u=n.nodes(),d=0;d1?t-1:0),r=1;r-1}},function(e,t,n){var r=n(75);e.exports=function(e,t){var n=this.__data__,i=r(n,e);return i<0?(++this.size,n.push([e,t])):n[i][1]=t,this}},function(e,t,n){var r=n(74);e.exports=function(){this.__data__=new r,this.size=0}},function(e,t){e.exports=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}},function(e,t){e.exports=function(e){return this.__data__.get(e)}},function(e,t){e.exports=function(e){return this.__data__.has(e)}},function(e,t,n){var r=n(74),i=n(117),o=n(118);e.exports=function(e,t){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!i||a.length<199)return a.push([e,t]),this.size=++n.size,this;n=this.__data__=new o(a)}return n.set(e,t),this.size=n.size,this}},function(e,t,n){var r=n(64),i=n(262),o=n(23),a=n(151),s=/^\[object .+?Constructor\]$/,l=Function.prototype,c=Object.prototype,u=l.toString,d=c.hasOwnProperty,f=RegExp("^"+u.call(d).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");e.exports=function(e){return!(!o(e)||i(e))&&(r(e)?f:s).test(a(e))}},function(e,t,n){var r=n(58),i=Object.prototype,o=i.hasOwnProperty,a=i.toString,s=r?r.toStringTag:void 0;e.exports=function(e){var t=o.call(e,s),n=e[s];try{e[s]=void 0;var r=!0}catch(e){}var i=a.call(e);return r&&(t?e[s]=n:delete e[s]),i}},function(e,t){var n=Object.prototype.toString;e.exports=function(e){return n.call(e)}},function(e,t,n){var r,i=n(263),o=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";e.exports=function(e){return!!o&&o in e}},function(e,t,n){var r=n(29)["__core-js_shared__"];e.exports=r},function(e,t){e.exports=function(e,t){return null==e?void 0:e[t]}},function(e,t,n){var r=n(266),i=n(74),o=n(117);e.exports=function(){this.size=0,this.__data__={hash:new r,map:new(o||i),string:new r}}},function(e,t,n){var r=n(267),i=n(268),o=n(269),a=n(270),s=n(271);function l(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t0){if(++t>=800)return arguments[0]}else t=0;return e.apply(void 0,arguments)}}},function(e,t,n){var r=n(173),i=n(340),o=n(344),a=n(174),s=n(345),l=n(129);e.exports=function(e,t,n){var c=-1,u=i,d=e.length,f=!0,p=[],h=p;if(n)f=!1,u=o;else if(d>=200){var g=t?null:s(e);if(g)return l(g);f=!1,u=a,h=new r}else h=t?[]:p;e:for(;++c-1}},function(e,t,n){var r=n(188),i=n(342),o=n(343);e.exports=function(e,t,n){return t==t?o(e,t,n):r(e,i,n)}},function(e,t){e.exports=function(e){return e!=e}},function(e,t){e.exports=function(e,t,n){for(var r=n-1,i=e.length;++r1||1===t.length&&e.hasEdge(t[0],t[0])}))}},function(e,t,n){var r=n(22);e.exports=function(e,t,n){return function(e,t,n){var r={},i=e.nodes();return i.forEach((function(e){r[e]={},r[e][e]={distance:0},i.forEach((function(t){e!==t&&(r[e][t]={distance:Number.POSITIVE_INFINITY})})),n(e).forEach((function(n){var i=n.v===e?n.w:n.v,o=t(n);r[e][i]={distance:o,predecessor:e}}))})),i.forEach((function(e){var t=r[e];i.forEach((function(n){var o=r[n];i.forEach((function(n){var r=o[e],i=t[n],a=o[n],s=r.distance+i.distance;s0;){if(n=l.removeMin(),r.has(s,n))a.setEdge(n,s[n]);else{if(u)throw new Error("Input graph is not connected: "+e);u=!0}e.nodeEdges(n).forEach(c)}return a}},function(e,t,n){"use strict";var r=n(11),i=n(399),o=n(402),a=n(403),s=n(20).normalizeRanks,l=n(405),c=n(20).removeEmptyRanks,u=n(406),d=n(407),f=n(408),p=n(409),h=n(418),g=n(20),m=n(28).Graph;e.exports=function(e,t){var n=t&&t.debugTiming?g.time:g.notime;n("layout",(function(){var t=n(" buildLayoutGraph",(function(){return function(e){var t=new m({multigraph:!0,compound:!0}),n=$(e.graph());return t.setGraph(r.merge({},b,S(n,v),r.pick(n,y))),r.forEach(e.nodes(),(function(n){var i=$(e.node(n));t.setNode(n,r.defaults(S(i,x),w)),t.setParent(n,e.parent(n))})),r.forEach(e.edges(),(function(n){var i=$(e.edge(n));t.setEdge(n,r.merge({},A,S(i,k),r.pick(i,E)))})),t}(e)}));n(" runLayout",(function(){!function(e,t){t(" makeSpaceForEdgeLabels",(function(){!function(e){var t=e.graph();t.ranksep/=2,r.forEach(e.edges(),(function(n){var r=e.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===t.rankdir||"BT"===t.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(e)})),t(" removeSelfEdges",(function(){!function(e){r.forEach(e.edges(),(function(t){if(t.v===t.w){var n=e.node(t.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:t,label:e.edge(t)}),e.removeEdge(t)}}))}(e)})),t(" acyclic",(function(){i.run(e)})),t(" nestingGraph.run",(function(){u.run(e)})),t(" rank",(function(){a(g.asNonCompoundGraph(e))})),t(" injectEdgeLabelProxies",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(n.width&&n.height){var r=e.node(t.v),i={rank:(e.node(t.w).rank-r.rank)/2+r.rank,e:t};g.addDummyNode(e,"edge-proxy",i,"_ep")}}))}(e)})),t(" removeEmptyRanks",(function(){c(e)})),t(" nestingGraph.cleanup",(function(){u.cleanup(e)})),t(" normalizeRanks",(function(){s(e)})),t(" assignRankMinMax",(function(){!function(e){var t=0;r.forEach(e.nodes(),(function(n){var i=e.node(n);i.borderTop&&(i.minRank=e.node(i.borderTop).rank,i.maxRank=e.node(i.borderBottom).rank,t=r.max(t,i.maxRank))})),e.graph().maxRank=t}(e)})),t(" removeEdgeLabelProxies",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);"edge-proxy"===n.dummy&&(e.edge(n.e).labelRank=n.rank,e.removeNode(t))}))}(e)})),t(" normalize.run",(function(){o.run(e)})),t(" parentDummyChains",(function(){l(e)})),t(" addBorderSegments",(function(){d(e)})),t(" order",(function(){p(e)})),t(" insertSelfEdges",(function(){!function(e){var t=g.buildLayerMatrix(e);r.forEach(t,(function(t){var n=0;r.forEach(t,(function(t,i){var o=e.node(t);o.order=i+n,r.forEach(o.selfEdges,(function(t){g.addDummyNode(e,"selfedge",{width:t.label.width,height:t.label.height,rank:o.rank,order:i+ ++n,e:t.e,label:t.label},"_se")})),delete o.selfEdges}))}))}(e)})),t(" adjustCoordinateSystem",(function(){f.adjust(e)})),t(" position",(function(){h(e)})),t(" positionSelfEdges",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);if("selfedge"===n.dummy){var r=e.node(n.e.v),i=r.x+r.width/2,o=r.y,a=n.x-i,s=r.height/2;e.setEdge(n.e,n.label),e.removeNode(t),n.label.points=[{x:i+2*a/3,y:o-s},{x:i+5*a/6,y:o-s},{x:i+a,y:o},{x:i+5*a/6,y:o+s},{x:i+2*a/3,y:o+s}],n.label.x=n.x,n.label.y=n.y}}))}(e)})),t(" removeBorderNodes",(function(){!function(e){r.forEach(e.nodes(),(function(t){if(e.children(t).length){var n=e.node(t),i=e.node(n.borderTop),o=e.node(n.borderBottom),a=e.node(r.last(n.borderLeft)),s=e.node(r.last(n.borderRight));n.width=Math.abs(s.x-a.x),n.height=Math.abs(o.y-i.y),n.x=a.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(e.nodes(),(function(t){"border"===e.node(t).dummy&&e.removeNode(t)}))}(e)})),t(" normalize.undo",(function(){o.undo(e)})),t(" fixupEdgeLabelCoords",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(e)})),t(" undoCoordinateSystem",(function(){f.undo(e)})),t(" translateGraph",(function(){!function(e){var t=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,o=0,a=e.graph(),s=a.marginx||0,l=a.marginy||0;function c(e){var r=e.x,a=e.y,s=e.width,l=e.height;t=Math.min(t,r-s/2),n=Math.max(n,r+s/2),i=Math.min(i,a-l/2),o=Math.max(o,a+l/2)}r.forEach(e.nodes(),(function(t){c(e.node(t))})),r.forEach(e.edges(),(function(t){var n=e.edge(t);r.has(n,"x")&&c(n)})),t-=s,i-=l,r.forEach(e.nodes(),(function(n){var r=e.node(n);r.x-=t,r.y-=i})),r.forEach(e.edges(),(function(n){var o=e.edge(n);r.forEach(o.points,(function(e){e.x-=t,e.y-=i})),r.has(o,"x")&&(o.x-=t),r.has(o,"y")&&(o.y-=i)})),a.width=n-t+s,a.height=o-i+l}(e)})),t(" assignNodeIntersects",(function(){!function(e){r.forEach(e.edges(),(function(t){var n,r,i=e.edge(t),o=e.node(t.v),a=e.node(t.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=a,r=o),i.points.unshift(g.intersectRect(o,n)),i.points.push(g.intersectRect(a,r))}))}(e)})),t(" reversePoints",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);n.reversed&&n.points.reverse()}))}(e)})),t(" acyclic.undo",(function(){i.undo(e)}))}(t,n)})),n(" updateInputGraph",(function(){!function(e,t){r.forEach(e.nodes(),(function(n){var r=e.node(n),i=t.node(n);r&&(r.x=i.x,r.y=i.y,t.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(e.edges(),(function(n){var i=e.edge(n),o=t.edge(n);i.points=o.points,r.has(o,"x")&&(i.x=o.x,i.y=o.y)})),e.graph().width=t.graph().width,e.graph().height=t.graph().height}(e,t)}))}))};var v=["nodesep","edgesep","ranksep","marginx","marginy"],b={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},y=["acyclicer","ranker","rankdir","align"],x=["width","height"],w={width:0,height:0},k=["minlen","weight","width","height","labeloffset"],A={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function S(e,t){return r.mapValues(r.pick(e,t),Number)}function $(e){var t={};return r.forEach(e,(function(e,n){t[n.toLowerCase()]=e})),t}},function(e,t,n){var r=n(149);e.exports=function(e){return r(e,5)}},function(e,t,n){var r=n(89),i=n(57),o=n(90),a=n(48),s=Object.prototype,l=s.hasOwnProperty,c=r((function(e,t){e=Object(e);var n=-1,r=t.length,c=r>2?t[2]:void 0;for(c&&o(t[0],t[1],c)&&(r=1);++n-1?s[l?t[c]:c]:void 0}}},function(e,t,n){var r=n(188),i=n(37),o=n(365),a=Math.max;e.exports=function(e,t,n){var s=null==e?0:e.length;if(!s)return-1;var l=null==n?0:o(n);return l<0&&(l=a(s+l,0)),r(e,i(t,3),l)}},function(e,t,n){var r=n(196);e.exports=function(e){var t=r(e),n=t%1;return t==t?n?t-n:t:0}},function(e,t,n){var r=n(367),i=n(23),o=n(61),a=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,l=/^0o[0-7]+$/i,c=parseInt;e.exports=function(e){if("number"==typeof e)return e;if(o(e))return NaN;if(i(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=i(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=r(e);var n=s.test(e);return n||l.test(e)?c(e.slice(2),n?2:8):a.test(e)?NaN:+e}},function(e,t,n){var r=n(368),i=/^\s+/;e.exports=function(e){return e?e.slice(0,r(e)+1).replace(i,""):e}},function(e,t){var n=/\s/;e.exports=function(e){for(var t=e.length;t--&&n.test(e.charAt(t)););return t}},function(e,t,n){var r=n(128),i=n(169),o=n(48);e.exports=function(e,t){return null==e?e:r(e,i(t),o)}},function(e,t){e.exports=function(e){var t=null==e?0:e.length;return t?e[t-1]:void 0}},function(e,t,n){var r=n(79),i=n(127),o=n(37);e.exports=function(e,t){var n={};return t=o(t,3),i(e,(function(e,i,o){r(n,i,t(e,i,o))})),n}},function(e,t,n){var r=n(132),i=n(373),o=n(49);e.exports=function(e){return e&&e.length?r(e,o,i):void 0}},function(e,t){e.exports=function(e,t){return e>t}},function(e,t,n){var r=n(375),i=n(379)((function(e,t,n){r(e,t,n)}));e.exports=i},function(e,t,n){var r=n(73),i=n(198),o=n(128),a=n(376),s=n(23),l=n(48),c=n(199);e.exports=function e(t,n,u,d,f){t!==n&&o(n,(function(o,l){if(f||(f=new r),s(o))a(t,n,l,u,e,d,f);else{var p=d?d(c(t,l),o,l+"",t,n,f):void 0;void 0===p&&(p=o),i(t,l,p)}}),l)}},function(e,t,n){var r=n(198),i=n(155),o=n(164),a=n(156),s=n(165),l=n(66),c=n(13),u=n(189),d=n(59),f=n(64),p=n(23),h=n(377),g=n(67),m=n(199),v=n(378);e.exports=function(e,t,n,b,y,x,w){var k=m(e,n),A=m(t,n),E=w.get(A);if(E)r(e,n,E);else{var S=x?x(k,A,n+"",e,t,w):void 0,$=void 0===S;if($){var C=c(A),_=!C&&d(A),O=!C&&!_&&g(A);S=A,C||_||O?c(k)?S=k:u(k)?S=a(k):_?($=!1,S=i(A,!0)):O?($=!1,S=o(A,!0)):S=[]:h(A)||l(A)?(S=k,l(k)?S=v(k):p(k)&&!f(k)||(S=s(A))):$=!1}$&&(w.set(A,S),y(S,A,b,x,w),w.delete(A)),r(e,n,S)}}},function(e,t,n){var r=n(47),i=n(84),o=n(32),a=Function.prototype,s=Object.prototype,l=a.toString,c=s.hasOwnProperty,u=l.call(Object);e.exports=function(e){if(!o(e)||"[object Object]"!=r(e))return!1;var t=i(e);if(null===t)return!0;var n=c.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&l.call(n)==u}},function(e,t,n){var r=n(65),i=n(48);e.exports=function(e){return r(e,i(e))}},function(e,t,n){var r=n(89),i=n(90);e.exports=function(e){return r((function(t,n){var r=-1,o=n.length,a=o>1?n[o-1]:void 0,s=o>2?n[2]:void 0;for(a=e.length>3&&"function"==typeof a?(o--,a):void 0,s&&i(n[0],n[1],s)&&(a=o<3?void 0:a,o=1),t=Object(t);++r1&&a(e,t[0],t[1])?t=[]:n>2&&a(t[0],t[1],t[2])&&(t=[t[0]]),i(e,r(t,1),[])}));e.exports=s},function(e,t,n){var r=n(88),i=n(86),o=n(37),a=n(184),s=n(393),l=n(82),c=n(394),u=n(49),d=n(13);e.exports=function(e,t,n){t=t.length?r(t,(function(e){return d(e)?function(t){return i(t,1===e.length?e[0]:e)}:e})):[u];var f=-1;t=r(t,l(o));var p=a(e,(function(e,n,i){return{criteria:r(t,(function(t){return t(e)})),index:++f,value:e}}));return s(p,(function(e,t){return c(e,t,n)}))}},function(e,t){e.exports=function(e,t){var n=e.length;for(e.sort(t);n--;)e[n]=e[n].value;return e}},function(e,t,n){var r=n(395);e.exports=function(e,t,n){for(var i=-1,o=e.criteria,a=t.criteria,s=o.length,l=n.length;++i=l?c:c*("desc"==n[i]?-1:1)}return e.index-t.index}},function(e,t,n){var r=n(61);e.exports=function(e,t){if(e!==t){var n=void 0!==e,i=null===e,o=e==e,a=r(e),s=void 0!==t,l=null===t,c=t==t,u=r(t);if(!l&&!u&&!a&&e>t||a&&s&&c&&!l&&!u||i&&s&&c||!n&&c||!o)return 1;if(!i&&!a&&!u&&e0;--l)if(r=t[l].dequeue()){i=i.concat(s(e,t,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(c,(function(t){return e.outEdges(t.v,t.w)})),!0)};var a=r.constant(1);function s(e,t,n,i,o){var a=o?[]:void 0;return r.forEach(e.inEdges(i.v),(function(r){var i=e.edge(r),s=e.node(r.v);o&&a.push({v:r.v,w:r.w}),s.out-=i,l(t,n,s)})),r.forEach(e.outEdges(i.v),(function(r){var i=e.edge(r),o=r.w,a=e.node(o);a.in-=i,l(t,n,a)})),e.removeNode(i.v),a}function l(e,t,n){n.out?n.in?e[n.out-n.in+t].enqueue(n):e[e.length-1].enqueue(n):e[0].enqueue(n)}},function(e,t){function n(){var e={};e._next=e._prev=e,this._sentinel=e}function r(e){e._prev._next=e._next,e._next._prev=e._prev,delete e._next,delete e._prev}function i(e,t){if("_next"!==e&&"_prev"!==e)return t}e.exports=n,n.prototype.dequeue=function(){var e=this._sentinel,t=e._prev;if(t!==e)return r(t),t},n.prototype.enqueue=function(e){var t=this._sentinel;e._prev&&e._next&&r(e),e._next=t._next,t._next._prev=e,t._next=e,e._prev=t},n.prototype.toString=function(){for(var e=[],t=this._sentinel,n=t._prev;n!==t;)e.push(JSON.stringify(n,i)),n=n._prev;return"["+e.join(", ")+"]"}},function(e,t,n){"use strict";var r=n(11),i=n(20);e.exports={run:function(e){e.graph().dummyChains=[],r.forEach(e.edges(),(function(t){!function(e,t){var n,r,o,a=t.v,s=e.node(a).rank,l=t.w,c=e.node(l).rank,u=t.name,d=e.edge(t),f=d.labelRank;if(c===s+1)return;for(e.removeEdge(t),o=0,++s;sl.lim&&(c=l,u=!0);var d=r.filter(t.edges(),(function(t){return u===b(e,e.node(t.v),c)&&u!==b(e,e.node(t.w),c)}));return r.minBy(d,(function(e){return o(t,e)}))}function v(e,t,n,i){var o=n.v,a=n.w;e.removeEdge(o,a),e.setEdge(i.v,i.w,{}),p(e),d(e,t),function(e,t){var n=r.find(e.nodes(),(function(e){return!t.node(e).parent})),i=s(e,n);i=i.slice(1),r.forEach(i,(function(n){var r=e.node(n).parent,i=t.edge(n,r),o=!1;i||(i=t.edge(r,n),o=!0),t.node(n).rank=t.node(r).rank+(o?i.minlen:-i.minlen)}))}(e,t)}function b(e,t,n){return n.low<=t.lim&&t.lim<=n.lim}e.exports=u,u.initLowLimValues=p,u.initCutValues=d,u.calcCutValue=f,u.leaveEdge=g,u.enterEdge=m,u.exchangeEdges=v},function(e,t,n){var r=n(11);e.exports=function(e){var t=function(e){var t={},n=0;function i(o){var a=n;r.forEach(e.children(o),i),t[o]={low:a,lim:n++}}return r.forEach(e.children(),i),t}(e);r.forEach(e.graph().dummyChains,(function(n){for(var r=e.node(n),i=r.edgeObj,o=function(e,t,n,r){var i,o,a=[],s=[],l=Math.min(t[n].low,t[r].low),c=Math.max(t[n].lim,t[r].lim);i=n;do{i=e.parent(i),a.push(i)}while(i&&(t[i].low>l||c>t[i].lim));o=i,i=r;for(;(i=e.parent(i))!==o;)s.push(i);return{path:a.concat(s.reverse()),lca:o}}(e,t,i.v,i.w),a=o.path,s=o.lca,l=0,c=a[l],u=!0;n!==i.w;){if(r=e.node(n),u){for(;(c=a[l])!==s&&e.node(c).maxRank=2),s=u.buildLayerMatrix(e);var m=o(e,s);m0;)t%2&&(n+=l[t+1]),l[t=t-1>>1]+=e.weight;c+=e.weight*n}))),c}e.exports=function(e,t){for(var n=0,r=1;r=e.barycenter)&&function(e,t){var n=0,r=0;e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.vs=t.vs.concat(e.vs),e.barycenter=n/r,e.weight=r,e.i=Math.min(t.i,e.i),t.merged=!0}(e,t)}}function i(t){return function(n){n.in.push(t),0==--n.indegree&&e.push(n)}}for(;e.length;){var o=e.pop();t.push(o),r.forEach(o.in.reverse(),n(o)),r.forEach(o.out,i(o))}return r.map(r.filter(t,(function(e){return!e.merged})),(function(e){return r.pick(e,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(e){return!e.indegree})))}},function(e,t,n){var r=n(11),i=n(20);function o(e,t,n){for(var i;t.length&&(i=r.last(t)).i<=n;)t.pop(),e.push(i.vs),n++;return n}e.exports=function(e,t){var n=i.partition(e,(function(e){return r.has(e,"barycenter")})),a=n.lhs,s=r.sortBy(n.rhs,(function(e){return-e.i})),l=[],c=0,u=0,d=0;a.sort((f=!!t,function(e,t){return e.barycentert.barycenter?1:f?t.i-e.i:e.i-t.i})),d=o(l,s,d),r.forEach(a,(function(e){d+=e.vs.length,l.push(e.vs),c+=e.barycenter*e.weight,u+=e.weight,d=o(l,s,d)}));var f;var p={vs:r.flatten(l,!0)};u&&(p.barycenter=c/u,p.weight=u);return p}},function(e,t,n){var r=n(11),i=n(28).Graph;e.exports=function(e,t,n){var o=function(e){var t;for(;e.hasNode(t=r.uniqueId("_root")););return t}(e),a=new i({compound:!0}).setGraph({root:o}).setDefaultNodeLabel((function(t){return e.node(t)}));return r.forEach(e.nodes(),(function(i){var s=e.node(i),l=e.parent(i);(s.rank===t||s.minRank<=t&&t<=s.maxRank)&&(a.setNode(i),a.setParent(i,l||o),r.forEach(e[n](i),(function(t){var n=t.v===i?t.w:t.v,o=a.edge(n,i),s=r.isUndefined(o)?0:o.weight;a.setEdge(n,i,{weight:e.edge(t).weight+s})})),r.has(s,"minRank")&&a.setNode(i,{borderLeft:s.borderLeft[t],borderRight:s.borderRight[t]}))})),a}},function(e,t,n){var r=n(11);e.exports=function(e,t,n){var i,o={};r.forEach(n,(function(n){for(var r,a,s=e.parent(n);s;){if((r=e.parent(s))?(a=o[r],o[r]=s):(a=i,i=s),a&&a!==s)return void t.setEdge(a,s);s=r}}))}},function(e,t,n){"use strict";var r=n(11),i=n(20),o=n(419).positionX;e.exports=function(e){(function(e){var t=i.buildLayerMatrix(e),n=e.graph().ranksep,o=0;r.forEach(t,(function(t){var i=r.max(r.map(t,(function(t){return e.node(t).height})));r.forEach(t,(function(t){e.node(t).y=o+i/2})),o+=i+n}))})(e=i.asNonCompoundGraph(e)),r.forEach(o(e),(function(t,n){e.node(n).x=t}))}},function(e,t,n){"use strict";var r=n(11),i=n(28).Graph,o=n(20);function a(e,t){var n={};return r.reduce(t,(function(t,i){var o=0,a=0,s=t.length,c=r.last(i);return r.forEach(i,(function(t,u){var d=function(e,t){if(e.node(t).dummy)return r.find(e.predecessors(t),(function(t){return e.node(t).dummy}))}(e,t),f=d?e.node(d).order:s;(d||t===c)&&(r.forEach(i.slice(a,u+1),(function(t){r.forEach(e.predecessors(t),(function(r){var i=e.node(r),a=i.order;!(as)&&l(n,t,c)}))}))}return r.reduce(t,(function(t,n){var o,a=-1,s=0;return r.forEach(n,(function(r,l){if("border"===e.node(r).dummy){var c=e.predecessors(r);c.length&&(o=e.node(c[0]).order,i(n,s,l,a,o),s=l,a=o)}i(n,s,n.length,o,t.length)})),n})),n}function l(e,t,n){if(t>n){var r=t;t=n,n=r}var i=e[t];i||(e[t]=i={}),i[n]=!0}function c(e,t,n){if(t>n){var i=t;t=n,n=i}return r.has(e[t],n)}function u(e,t,n,i){var o={},a={},s={};return r.forEach(t,(function(e){r.forEach(e,(function(e,t){o[e]=e,a[e]=e,s[e]=t}))})),r.forEach(t,(function(e){var t=-1;r.forEach(e,(function(e){var l=i(e);if(l.length)for(var u=((l=r.sortBy(l,(function(e){return s[e]}))).length-1)/2,d=Math.floor(u),f=Math.ceil(u);d<=f;++d){var p=l[d];a[e]===e&&t\n.menu ul ul {\n margin-left: 12px;\n}\n\n\n\n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(425),i=n(21);n(426),angular.module("dbt").directive("modelTreeLine",["$state",function(e){return{scope:{item:"=",depth:"<",resourceType:"@"},replace:!0,templateUrl:r,link:function(t,n,r,o){t.depth||(t.depth=0);var a=t.item.name;if(a){var s=i.last(a,15).join(""),l=i.initial(a,s.length).join("");t.name={name:a,start:l,end:s},t.name_start=l,t.name_end=s,t.onFolderClick=function(n){if(n.active=!n.active,"source"==t.resourceType){var r=n.name;e.go("dbt.source_list",{source:r})}else 0===t.depth&&"database"!==n.type&&e.go("dbt.project_overview",{project_name:n.name})},t.activate=function(n){t.$emit("clearSearch"),n.active=!0;var r="dbt."+n.node.resource_type;e.go(r,{unique_id:n.unique_id})},t.getIcon=function(e,t){return"#"+{header:{on:"icn-down",off:"icn-right"},database:{on:"icn-db-on",off:"icn-db"},schema:{on:"icn-tree-on",off:"icn-tree"},table:{on:"icn-doc-on",off:"icn-doc"},folder:{on:"icn-dir-on",off:"icn-dir"},file:{on:"icn-doc-on",off:"icn-doc"}}[e][t]},t.getClass=function(e){return{active:e.active,"menu-tree":"header"==e.type||"schema"==e.type||"folder"==e.type,"menu-main":"header"==e.type,"menu-node":"file"==e.type||"table"==e.type}}}}}}])},function(e,t){var n="/components/model_tree/model_tree_line.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
  • \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n
      \n \n
    \n
  • \n')}]),e.exports=n},function(e,t,n){var r=n(427);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.unselectable{\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(429);n(31);n(206),r.module("dbt").directive("docsSearch",["$sce","project",function(e,t){return{scope:{query:"=",results:"=",onSelect:"&"},replace:!0,templateUrl:i,link:function(n){n.max_results=20,n.show_all=!1,n.max_results_columns=3,n.limit_columns={},n.checkboxStatus={show_names:!1,show_descriptions:!1,show_columns:!1,show_code:!1,show_tags:!1},n.limit_search=function(e,t,r){return t0&&null!=n.query&&n.query.trim().length>0){let t=e.replace(/\s+/g," "),o=r(i(n.query)[0]),a=t.search(new RegExp(o)),s=a-75<0?0:a-75,l=a+75>t.length?t.length:a+75;return"..."+t.substring(s,l)+"..."}return e},n.highlight=function(t){if(!n.query||!t)return e.trustAsHtml(t);let o="("+i(n.query).map(e=>r(e)).join(")|(")+")";return e.trustAsHtml(t.replace(new RegExp(o,"gi"),'$&'))},n.$watch("query",(function(e,t){0==e.length&&(n.show_all=!1,n.limit_columns={})})),n.columnFilter=function(e){var t=[];let r=i(n.query);for(var o in e)r.every(e=>-1!=o.toLowerCase().indexOf(e))&&t.push(o);return t},n.limitColumns=function(e){return void 0!==n.limit_columns[e]?n.limit_columns[e]:3}}}}])},function(e,t){var n="/components/search/search.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n \n
    \n
    \n

    \n {{ query }}\n {{ results.length }} search results\n

    \n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n \n
    \n
    \n

    \n \n {{result.model.resource_type}}\n

    \n

    \n
    \n
    \n
    \n \n columns:\n \n \n \n Show {{ columnFilter(result.model.columns).length - max_results_columns }} more\n
    \n
    \n \n \n \n
    \n
    \n \n tags:\n \n \n \n
    \n
    \n Show {{ results.length - max_results }} more\n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(431);n(432);const i=n(21);angular.module("dbt").directive("tableDetails",["$sce","$filter",function(e,t){return{scope:{model:"=",extras:"=",exclude:"<"},templateUrl:r,link:function(e){function n(e,t){if(0==e)return"0 bytes";if(e<1&&(e*=1e6),isNaN(parseFloat(e))||!isFinite(e))return"-";void 0===t&&(t=0);var n=Math.floor(Math.log(e)/Math.log(1024));return(e/Math.pow(1024,Math.floor(n))).toFixed(t)+" "+["bytes","KB","MB","GB","TB","PB"][n]}function r(e,n){return void 0===n&&(n=2),t("number")(100*e,n)+"%"}function o(e,n){return void 0===n&&(n=0),t("number")(e,n)}e.details=[],e.extended=[],e.exclude=e.exclude||[],e.meta=null,e._show_expanded=!1,e.show_expanded=function(t){return void 0!==t&&(e._show_expanded=t),e._show_expanded},e.hasData=function(e){return!(!e||i.isEmpty(e))&&(1!=e.length||0!=e[0].include)},e.$watch("model",(function(t,a){i.property(["metadata","type"])(t);var s,l,c,u=t.hasOwnProperty("sources")&&null!=t.sources[0]?t.sources[0].source_meta:null;if(e.meta=t.meta||u,e.details=function(e){var t,n,r=!e.metadata,o=e.metadata||{};t=e.database?e.database+".":"",n=r?void 0:"source"==e.resource_type?t+e.schema+"."+e.identifier:t+e.schema+"."+e.alias;var a,s=[{name:"Owner",value:o.owner},{name:"Type",value:r?void 0:(a=o.type,"BASE TABLE"==a?{type:"table",name:"table"}:"LATE BINDING VIEW"==a?{type:"view",name:"late binding view"}:{type:a.toLowerCase(),name:a.toLowerCase()}).name},{name:"Package",value:e.package_name},{name:"Language",value:e.language},{name:"Relation",value:n}];return i.filter(s,(function(e){return void 0!==e.value}))}(t),e.extended=(s=t.stats,l={rows:o,row_count:o,num_rows:o,max_varchar:o,pct_used:r,size:n,bytes:n,num_bytes:n},c=i.sortBy(i.values(s),"label"),i.map(c,(function(e){var t=i.clone(e),n=l[e.id];return n&&(t.value=n(e.value),t.label=e.label.replace("Approximate","~"),t.label=e.label.replace("Utilization","Used")),t}))),e.extras){var d=i.filter(e.extras,(function(e){return void 0!==e.value&&null!==e.value}));e.details=e.details.concat(d)}e.show_extended=i.where(e.extended,{include:!0}).length>0})),e.queryTag=function(t){e.$emit("query",t)}}}}])},function(e,t){var n="/components/table_details/table_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    Details
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    Tags
    \n
    \n {{ tag }} \n
    \n
    untagged
    \n
    \n
    \n
    {{ item.name }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ item.label }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){var r=n(433);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n\n.details-content {\n table-layout: fixed;\n}\n\n.detail-body {\n white-space: nowrap;\n overflow-x: scroll;\n}\n",""])},function(e,t,n){"use strict";const r=n(435),i=n(21);angular.module("dbt").directive("columnDetails",["project",function(e){return{scope:{model:"="},templateUrl:r,link:function(t){t.has_test=function(e,t){return-1!=i.pluck(e.tests,"short").indexOf(t)},t.has_more_info=function(e){var t=e.tests||[],n=e.description||"",r=e.meta||{};return t.length||n.length||!i.isEmpty(r)},t.toggle_column_expanded=function(e){t.has_more_info(e)&&(e.expanded=!e.expanded)},t.getState=function(e){return"dbt."+e.resource_type},t.get_col_name=function(t){return e.caseColumn(t)},t.get_columns=function(e){var t=i.chain(e.columns).values().sortBy("index").value();return i.each(t,(function(e,t){e.index=t})),t}}}}])},function(e,t){var n="/components/column_details/column_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n
    \n Column information is not available for this seed\n
    \n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ColumnTypeDescriptionTestsMore?
    \n
    \n {{ get_col_name(column.name) }}\n
    \n
    \n {{ column.type }}

    \n
    \n {{ column.description }}\n \n \n U\n N\n F\n A\n +\n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Details
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n\n
    \n
    Description
    \n \n
    \n\n
    \n
    Generic Tests
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(437);n(31),n(438);function i(e){return"python"===e?"language-python":"language-sql"}angular.module("dbt").directive("codeBlock",["code","$timeout",function(e,t){return{scope:{versions:"=",default:"<",language:"="},restrict:"E",templateUrl:r,link:function(n,r){n.selected_version=n.default,n.language_class=i(n.language),n.source=null,n.setSelected=function(r){n.selected_version=r,n.source=n.versions[r]||"";const i=n.source.trim();n.highlighted=e.highlight(i,n.language),t((function(){Prism.highlightAll()}))},n.titleCase=function(e){return e.charAt(0).toUpperCase()+e.substring(1)},n.copied=!1,n.copy_to_clipboard=function(){e.copy_to_clipboard(n.source),n.copied=!0,setTimeout((function(){n.$apply((function(){n.copied=!1}))}),1e3)},n.$watch("language",(function(e,t){e&&e!=t&&(n.language_class=i(e))}),!0),n.$watch("versions",(function(e,t){if(e)if(n.default)n.setSelected(n.default);else{var r=Object.keys(n.versions);r.length>0&&n.setSelected(r[0])}}),!0)}}}])},function(e,t){var n="/components/code_block/code_block.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    Code
    \n\n')}]),e.exports=n},function(e,t,n){var r=n(439);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"pre.code {\n border: none !important;\n overflow-y: visible !important;\n overflow-x: scroll !important;\n padding-bottom: 10px;\n}\n\npre.code code {\n font-family: Monaco, monospace !important;\n font-weight: 400 !important;\n}\n\n.line-numbers-rows {\n border: none !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(441);angular.module("dbt").directive("macroArguments",[function(){return{scope:{macro:"="},templateUrl:r,link:function(e){_.each(e.macro.arguments,(function(e){e.expanded=!1}))}}}])},function(e,t){var n="/components/macro_arguments/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n
    \n
    \n Details are not available for this macro\n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ArgumentTypeDescriptionMore?
    \n
    \n {{ arg.name }}\n
    \n
    \n {{ arg.type }}

    \n
    \n {{ arg.description }}\n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Description
    \n \n
    \n
    \n
    \n
    \n
    \n\n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(443);angular.module("dbt").directive("referenceList",["$state",function(e){return{scope:{references:"=",node:"="},restrict:"E",templateUrl:r,link:function(t){t.selected_type=null,t.setType=function(e){t.selected_type=e,t.nodes=t.references[t.selected_type]},t.getNodeUrl=function(t){var n="dbt."+t.resource_type;return e.href(n,{unique_id:t.unique_id,"#":null})},t.mapResourceType=function(e){return"model"==e?"Models":"seed"==e?"Seeds":"test"==e?"Tests":"snapshot"==e?"Snapshots":"analysis"==e?"Analyses":"macro"==e?"Macros":"exposure"==e?"Exposures":"metric"==e?"Metrics":"operation"==e?"Operations":"Nodes"},t.$watch("references",(function(e){e&&_.size(e)>0?(t.selected_type=_.keys(e)[0],t.has_references=!0,t.nodes=t.references[t.selected_type]):t.has_references=!1}))}}}])},function(e,t){var n="/components/references/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n No resources reference this {{ node.resource_type }}\n
    \n
    \n \n
    \n \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){n(445),n(447),n(448),n(449),n(450),n(451),n(452),n(453),n(454),n(455)},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ModelCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.copied=!1,e.copy_to_clipboard=function(t){r.copy_to_clipboard(t),e.copied=!0,setTimeout((function(){e.$apply((function(){e.copied=!1}))}),1e3)},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.nav-tabs li.nav-pull-right {\n flex: 1 0 auto;\n text-align: right;\n}\n\ntr.column-row-selected {\n\n}\n\ntd.column-expanded{\n padding: 0px !important;\n}\n\ntd.column-expanded > div {\n padding: 5px 10px;\n margin-left: 20px;\n height: 100%;\n\n border-left: 1px solid #ccc !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SourceCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Sample SQL":r.generateSourceSQL(e.model)},e.extra_table_fields=[{name:"Loader",value:e.model.loader},{name:"Source",value:e.model.source_name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SeedCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Example SQL":r.generateSourceSQL(e.model)}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SnapshotCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"Compiled SQL is not available for this snapshot"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("TestCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(33);n(34),r.module("dbt").controller("MacroCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,a,s,l){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.macro={},n.ready((function(t){let n=t.macros[e.model_uid];if(e.macro=n,e.references=o.getMacroReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=o.getMacroParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.macro.is_adapter_macro){var r=t.metadata.adapter_type;e.versions=n.impls,n.impls[r]?e.default_version=r:n.impls.default?e.default_version="default":e.default_version=i.keys(n.impls)[0]}else e.default_version="Source",e.versions={Source:e.macro.macro_sql}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("AnalysisCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.default_version="Source",e.versions={Source:"",Compiled:""},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language,e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ExposureCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.exposure={},n.ready((function(t){let n=t.nodes[e.model_uid];e.exposure=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.language=n.language,e.extra_table_fields=[{name:"Maturity",value:e.exposure.maturity},{name:"Owner",value:e.exposure.owner.name},{name:"Owner email",value:e.exposure.owner.email},{name:"Exposure name",value:e.exposure.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("MetricCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.metric={},n.ready((function(t){let n=t.nodes[e.model_uid];e.metric=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.versions={Definition:r.generateMetricSQL(e.metric)};const o="expression"===e.metric.type?"Expression metric":"Aggregate metric";e.extra_table_fields=[{name:"Metric Type",value:o},{name:"Metric name",value:e.metric.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("OperationCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";n(9).module("dbt").controller("GraphCtrl",["$scope","$state","$window","graph","project","selectorService",function(e,t,n,r,i,o){function a(e){return e&&"source"==e.resource_type?"source:"+e.source_name+"."+e.name:e&&"exposure"==e.resource_type?"exposure:"+e.name:e&&"metric"==e.resource_type?"metric:"+e.name:e.name?e.name:"*"}e.graph=r.graph,e.graphService=r,e.graphRendered=function(e){r.setGraphReady(e)},e.$watch((function(){return t.params.unique_id}),(function(e,t){e&&e!=t&&i.find_by_id(e,(function(e){e&&("sidebar"==r.orientation?r.showVerticalGraph(a(e),!1):r.showFullGraph(a(e)))})),e||o.clearViewNode()}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(31),a=n(458);n(459),n(206),n(467),n(469),n(472),n(476),r.module("dbt").controller("MainController",["$scope","$route","$state","project","graph","selectorService","trackingService","locationService","$transitions",function(e,t,n,r,s,l,c,u,d){function f(t){e.model_uid=t;var n=r.node(t);n&&l.resetSelection(n)}function p(e){e&&setTimeout((function(){var t=o("*[data-nav-unique-id='"+e+"']");t.length&&t[0].scrollIntoView&&t[0].scrollIntoView({behavior:"smooth",block:"center",inline:"center"})}),1)}e.tree={database:{},project:{},sources:{}},e.search={query:"",results:[],is_focused:!1},e.logo=a,e.model_uid=null,e.project={},o("body").bind("keydown",(function(e){"t"==event.key&&"INPUT"!=event.target.tagName&&(console.log("Opening search"),o("#search").focus(),event.preventDefault())})),e.onSearchFocus=function(t,n){e.search.is_focused=n},e.clearSearch=function(){e.search.is_focused=!1,e.search.query="",e.search.results=[],o("#search").blur()},e.$on("clearSearch",(function(){e.clearSearch()})),e.$on("query",(function(t,n){e.search.is_focused=!0,e.search.query=n})),e.onSearchKeypress=function(t){"Escape"==t.key&&(e.clearSearch(),t.preventDefault())},r.getModelTree(n.params.unique_id,(function(t){e.tree.database=t.database,e.tree.project=t.project,e.tree.sources=t.sources,e.tree.exposures=t.exposures,e.tree.metrics=t.metrics,setTimeout((function(){p(e.model_uid)}))})),d.onSuccess({},(function(t,n){var i=t.router.globals.params,o=l.getViewNode(),a=o?o.unique_id:null,s=i.unique_id,u=!0;if(t.from().name==t.to().name&&a==s&&(u=!1),u&&i.unique_id){var d=r.updateSelected(i.unique_id);e.tree.database=d.database,e.tree.project=d.project,e.tree.sources=d.sources,e.search.query="",console.log("updating selected model to: ",i),f(i.unique_id),setTimeout((function(){p(i.unique_id)}))}u&&c.track_pageview()})),e.$watch("search.query",(function(t){e.search.results=function(t){if(""===e.search.query)return t;let n={name:10,tags:5,description:3,raw_code:2,columns:1};return i.each(t,(function(t){t.overallWeight=0,i.each(Object.keys(n),(function(r){if(null!=t.model[r]){let o=0,a=t.model[r],s=e.search.query.toLowerCase();if("columns"===r)i.each(a,(function(e){if(e.name){let t=e.name.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}}));else if("tags"===r)i.each(a,(function(e){let t=e.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}));else{a=a.toLowerCase();let e=0;for(;-1!=e;)e=a.indexOf(s,e),-1!=e&&(o++,e++)}t.overallWeight+=o*n[r]}}))})),t}(r.search(t))})),r.init(),r.ready((function(t){e.project=t,e.search.results=r.search("");var o=i.unique(i.pluck(i.values(t.nodes),"package_name")).sort(),a=[null];i.each(t.nodes,(function(e){var t=e.tags;a=i.union(a,t).sort()})),l.init({packages:o,tags:a}),f(n.params.unique_id);var d=u.parseState(n.params);d.show_graph&&s.ready((function(){i.assign(l.selection.dirty,d.selected);var e=l.updateSelection();s.updateGraph(e)}));var p=t.metadata||{};c.init({track:p.send_anonymous_usage_stats,project_id:p.project_id})}))}])},function(e,t){e.exports="data:image/svg+xml,%3Csvg width='242' height='90' viewBox='0 0 242 90' fill='none' xmlns='http://www.w3.org/2000/svg'%3E %3Cpath d='M240.384 74.5122L239.905 75.8589H239.728L239.249 74.5156V75.8589H238.941V74.0234H239.324L239.816 75.3872L240.309 74.0234H240.691V75.8589H240.384V74.5122ZM238.671 74.3003H238.169V75.8589H237.858V74.3003H237.352V74.0234H238.671V74.3003Z' fill='%23262A38'/%3E %3Cpath d='M154.123 13.915V75.3527H141.672V69.0868C140.37 71.2839 138.499 73.0742 136.22 74.2134C133.779 75.434 131.012 76.085 128.246 76.085C124.828 76.1664 121.41 75.1899 118.562 73.2369C115.633 71.2839 113.354 68.5986 111.889 65.425C110.262 61.7631 109.448 57.8572 109.529 53.8698C109.448 49.8825 110.262 45.9765 111.889 42.3961C113.354 39.3038 115.633 36.6185 118.481 34.7469C121.41 32.8753 124.828 31.9801 128.246 32.0615C130.931 32.0615 133.616 32.6311 135.976 33.8517C138.255 34.991 140.126 36.6999 141.428 38.8156V18.0651L154.123 13.915ZM139.15 63.2279C140.777 61.1121 141.672 58.0199 141.672 54.0326C141.672 50.0452 140.859 47.0344 139.15 44.9187C137.441 42.8029 134.755 41.5823 131.989 41.6637C129.222 41.5009 126.537 42.7215 124.746 44.8373C123.038 46.953 122.142 49.9639 122.142 53.8698C122.142 57.8572 123.038 60.9494 124.746 63.1465C126.455 65.3436 129.222 66.5642 131.989 66.4828C135.081 66.4828 137.522 65.3436 139.15 63.2279Z' fill='%23262A38'/%3E %3Cpath d='M198.635 34.6655C201.564 36.5371 203.843 39.2225 205.226 42.3147C206.853 45.8952 207.667 49.8011 207.586 53.7885C207.667 57.7758 206.853 61.7632 205.226 65.3436C203.761 68.5172 201.483 71.2026 198.553 73.1556C195.705 75.0272 192.287 76.0037 188.87 75.9223C186.103 76.0037 183.336 75.3527 180.895 74.0507C178.617 72.9114 176.745 71.1212 175.524 68.9241V75.2713H162.993V18.0651L175.606 13.915V38.9783C176.826 36.7812 178.698 34.991 180.976 33.8517C183.418 32.5498 186.103 31.8988 188.87 31.9801C192.287 31.8988 195.705 32.8753 198.635 34.6655ZM192.45 63.1465C194.159 60.9494 194.973 57.8572 194.973 53.7885C194.973 49.8825 194.159 46.8716 192.45 44.7559C190.741 42.6402 188.381 41.5823 185.289 41.5823C182.523 41.4196 179.837 42.6402 178.047 44.8373C176.338 47.0344 175.524 50.0452 175.524 53.9512C175.524 57.9386 176.338 61.0308 178.047 63.1465C179.756 65.3436 182.441 66.5642 185.289 66.4015C188.056 66.5642 190.741 65.3436 192.45 63.1465Z' fill='%23262A38'/%3E %3Cpath d='M225 42.4774V58.915C225 61.2749 225.651 62.9838 226.791 64.0416C228.093 65.1809 229.801 65.7505 231.592 65.6691C232.975 65.6691 234.44 65.425 235.742 65.0995V74.8644C233.382 75.6782 230.941 76.085 228.499 76.0037C223.292 76.0037 219.304 74.5389 216.537 71.6094C213.771 68.68 212.387 64.5299 212.387 59.1592V23.1103L225 19.0416V33.038H235.742V42.4774H225Z' fill='%23262A38'/%3E %3Cpath d='M86.1754 3.74322C88.2911 5.77758 89.6745 8.46293 90 11.3924C90 12.613 89.6745 13.4268 88.9421 14.9729C88.2098 16.519 79.1772 32.1429 76.4919 36.4557C74.9458 38.9783 74.132 41.9892 74.132 44.9186C74.132 47.9295 74.9458 50.859 76.4919 53.3816C79.1772 57.6944 88.2098 73.3996 88.9421 74.9457C89.6745 76.4919 90 77.2242 90 78.4448C89.6745 81.3743 88.3725 84.0597 86.2568 86.0127C84.2224 88.1284 81.5371 89.5118 78.689 89.7559C77.4684 89.7559 76.6546 89.4304 75.1899 88.698C73.7251 87.9656 57.7758 79.1772 53.4629 76.4919C53.1374 76.3291 52.8119 76.085 52.4051 75.9222L31.085 63.3092C31.5732 67.3779 33.3635 71.2839 36.2929 74.132C36.8626 74.7016 37.4322 75.1899 38.0832 75.6781C37.5949 75.9222 37.0253 76.1664 36.5371 76.4919C32.2242 79.1772 16.519 88.2098 14.9729 88.9421C13.4268 89.6745 12.6944 90 11.3924 90C8.46293 89.6745 5.77758 88.3725 3.82459 86.2568C1.70886 84.2224 0.325497 81.5371 0 78.6076C0.0813743 77.387 0.406872 76.1664 1.05787 75.1085C1.79024 73.5624 10.8228 57.8571 13.5081 53.5443C15.0542 51.0217 15.868 48.0922 15.868 45.0814C15.868 42.0705 15.0542 39.141 13.5081 36.6184C10.8228 32.1429 1.70886 16.4376 1.05787 14.8915C0.406872 13.8336 0.0813743 12.613 0 11.3924C0.325497 8.46293 1.62749 5.77758 3.74322 3.74322C5.77758 1.62749 8.46293 0.325497 11.3924 0C12.613 0.0813743 13.8336 0.406872 14.9729 1.05787C16.2749 1.62749 27.7486 8.30018 33.8517 11.8807L35.2351 12.6944C35.7233 13.0199 36.1302 13.264 36.4557 13.4268L37.1067 13.8336L58.8336 26.6908C58.3454 21.8083 55.8228 17.3327 51.9168 14.3219C52.4051 14.0778 52.9747 13.8336 53.4629 13.5081C57.7758 10.8228 73.481 1.70886 75.0271 1.05787C76.085 0.406872 77.3056 0.0813743 78.6076 0C81.4557 0.325497 84.1411 1.62749 86.1754 3.74322ZM46.1392 50.7776L50.7776 46.1392C51.4286 45.4882 51.4286 44.5118 50.7776 43.8608L46.1392 39.2224C45.4882 38.5714 44.5118 38.5714 43.8608 39.2224L39.2224 43.8608C38.5714 44.5118 38.5714 45.4882 39.2224 46.1392L43.8608 50.7776C44.4304 51.3472 45.4882 51.3472 46.1392 50.7776Z' fill='%23FF694A'/%3E %3C/svg%3E"},function(e,t,n){"use strict";n.r(t);var r=n(63),i=n.n(r);n(460),n(461),n(462),n(463),n(465);const o=n(9),a=(n(31),n(21));window.Prism=i.a,o.module("dbt").factory("code",["$sce",function(e){var t={copied:!1,highlight:function(t,n="sql"){if("sql"==n)var r=i.a.highlight(t,i.a.languages.sql,"sql");else if("python"==n)r=i.a.highlight(t,i.a.languages.python,"python");return e.trustAsHtml(r)},copy_to_clipboard:function(e){var t=document.createElement("textarea");t.value=e,t.setAttribute("readonly",""),t.style.position="absolute",t.style.left="-9999px",document.body.appendChild(t),t.select(),document.execCommand("copy"),document.body.removeChild(t)},generateSourceSQL:function(e){var t=["select"],n=a.size(e.columns),r=a.keys(e.columns);a.each(r,(function(e,r){var i=" "+e;r+1!=n&&(i+=","),t.push(i)}));const i=(e.database?e.database+".":"")+e.schema+"."+e.identifier;return t.push("from "+i),t.join("\n")},generateMetricSQL:function(e){if("expression"==e.type)return e.sql;const t=[`select ${e.type}(${e.sql})`,`from {{ ${e.model} }}`];if(e.filters.length>0){const n=e.filters.map(e=>`${e.field} ${e.operator} ${e.value}`).join(" AND ");t.push("where "+n)}return t.join("\n")}};return t}])},function(e,t){Prism.languages.sql={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:--|\/\/|#).*)/,lookbehind:!0},variable:[{pattern:/@(["'`])(?:\\[\s\S]|(?!\1)[^\\])+\1/,greedy:!0},/@[\w.$]+/],string:{pattern:/(^|[^@\\])("|')(?:\\[\s\S]|(?!\2)[^\\]|\2\2)*\2/,greedy:!0,lookbehind:!0},identifier:{pattern:/(^|[^@\\])`(?:\\[\s\S]|[^`\\]|``)*`/,greedy:!0,lookbehind:!0,inside:{punctuation:/^`|`$/}},function:/\b(?:AVG|COUNT|FIRST|FORMAT|LAST|LCASE|LEN|MAX|MID|MIN|MOD|NOW|ROUND|SUM|UCASE)(?=\s*\()/i,keyword:/\b(?:ACTION|ADD|AFTER|ALGORITHM|ALL|ALTER|ANALYZE|ANY|APPLY|AS|ASC|AUTHORIZATION|AUTO_INCREMENT|BACKUP|BDB|BEGIN|BERKELEYDB|BIGINT|BINARY|BIT|BLOB|BOOL|BOOLEAN|BREAK|BROWSE|BTREE|BULK|BY|CALL|CASCADED?|CASE|CHAIN|CHAR(?:ACTER|SET)?|CHECK(?:POINT)?|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMNS?|COMMENT|COMMIT(?:TED)?|COMPUTE|CONNECT|CONSISTENT|CONSTRAINT|CONTAINS(?:TABLE)?|CONTINUE|CONVERT|CREATE|CROSS|CURRENT(?:_DATE|_TIME|_TIMESTAMP|_USER)?|CURSOR|CYCLE|DATA(?:BASES?)?|DATE(?:TIME)?|DAY|DBCC|DEALLOCATE|DEC|DECIMAL|DECLARE|DEFAULT|DEFINER|DELAYED|DELETE|DELIMITERS?|DENY|DESC|DESCRIBE|DETERMINISTIC|DISABLE|DISCARD|DISK|DISTINCT|DISTINCTROW|DISTRIBUTED|DO|DOUBLE|DROP|DUMMY|DUMP(?:FILE)?|DUPLICATE|ELSE(?:IF)?|ENABLE|ENCLOSED|END|ENGINE|ENUM|ERRLVL|ERRORS|ESCAPED?|EXCEPT|EXEC(?:UTE)?|EXISTS|EXIT|EXPLAIN|EXTENDED|FETCH|FIELDS|FILE|FILLFACTOR|FIRST|FIXED|FLOAT|FOLLOWING|FOR(?: EACH ROW)?|FORCE|FOREIGN|FREETEXT(?:TABLE)?|FROM|FULL|FUNCTION|GEOMETRY(?:COLLECTION)?|GLOBAL|GOTO|GRANT|GROUP|HANDLER|HASH|HAVING|HOLDLOCK|HOUR|IDENTITY(?:COL|_INSERT)?|IF|IGNORE|IMPORT|INDEX|INFILE|INNER|INNODB|INOUT|INSERT|INT|INTEGER|INTERSECT|INTERVAL|INTO|INVOKER|ISOLATION|ITERATE|JOIN|KEYS?|KILL|LANGUAGE|LAST|LEAVE|LEFT|LEVEL|LIMIT|LINENO|LINES|LINESTRING|LOAD|LOCAL|LOCK|LONG(?:BLOB|TEXT)|LOOP|MATCH(?:ED)?|MEDIUM(?:BLOB|INT|TEXT)|MERGE|MIDDLEINT|MINUTE|MODE|MODIFIES|MODIFY|MONTH|MULTI(?:LINESTRING|POINT|POLYGON)|NATIONAL|NATURAL|NCHAR|NEXT|NO|NONCLUSTERED|NULLIF|NUMERIC|OFF?|OFFSETS?|ON|OPEN(?:DATASOURCE|QUERY|ROWSET)?|OPTIMIZE|OPTION(?:ALLY)?|ORDER|OUT(?:ER|FILE)?|OVER|PARTIAL|PARTITION|PERCENT|PIVOT|PLAN|POINT|POLYGON|PRECEDING|PRECISION|PREPARE|PREV|PRIMARY|PRINT|PRIVILEGES|PROC(?:EDURE)?|PUBLIC|PURGE|QUICK|RAISERROR|READS?|REAL|RECONFIGURE|REFERENCES|RELEASE|RENAME|REPEAT(?:ABLE)?|REPLACE|REPLICATION|REQUIRE|RESIGNAL|RESTORE|RESTRICT|RETURN(?:ING|S)?|REVOKE|RIGHT|ROLLBACK|ROUTINE|ROW(?:COUNT|GUIDCOL|S)?|RTREE|RULE|SAVE(?:POINT)?|SCHEMA|SECOND|SELECT|SERIAL(?:IZABLE)?|SESSION(?:_USER)?|SET(?:USER)?|SHARE|SHOW|SHUTDOWN|SIMPLE|SMALLINT|SNAPSHOT|SOME|SONAME|SQL|START(?:ING)?|STATISTICS|STATUS|STRIPED|SYSTEM_USER|TABLES?|TABLESPACE|TEMP(?:ORARY|TABLE)?|TERMINATED|TEXT(?:SIZE)?|THEN|TIME(?:STAMP)?|TINY(?:BLOB|INT|TEXT)|TOP?|TRAN(?:SACTIONS?)?|TRIGGER|TRUNCATE|TSEQUAL|TYPES?|UNBOUNDED|UNCOMMITTED|UNDEFINED|UNION|UNIQUE|UNLOCK|UNPIVOT|UNSIGNED|UPDATE(?:TEXT)?|USAGE|USE|USER|USING|VALUES?|VAR(?:BINARY|CHAR|CHARACTER|YING)|VIEW|WAITFOR|WARNINGS|WHEN|WHERE|WHILE|WITH(?: ROLLUP|IN)?|WORK|WRITE(?:TEXT)?|YEAR)\b/i,boolean:/\b(?:FALSE|NULL|TRUE)\b/i,number:/\b0x[\da-f]+\b|\b\d+(?:\.\d*)?|\B\.\d+\b/i,operator:/[-+*\/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?|\b(?:AND|BETWEEN|DIV|ILIKE|IN|IS|LIKE|NOT|OR|REGEXP|RLIKE|SOUNDS LIKE|XOR)\b/i,punctuation:/[;[\]()`,.]/}},function(e,t){Prism.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},"string-interpolation":{pattern:/(?:f|fr|rf)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|br|rb)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|br|rb)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/m,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:_(?=\s*:)|and|as|assert|async|await|break|case|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|match|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:False|None|True)\b/,number:/\b0(?:b(?:_?[01])+|o(?:_?[0-7])+|x(?:_?[a-f0-9])+)\b|(?:\b\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\B\.\d+(?:_\d+)*)(?:e[+-]?\d+(?:_\d+)*)?j?(?!\w)/i,operator:/[-+%=]=?|!=|:=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/},Prism.languages.python["string-interpolation"].inside.interpolation.inside.rest=Prism.languages.python,Prism.languages.py=Prism.languages.python},function(e,t){!function(){if("undefined"!=typeof Prism&&"undefined"!=typeof document){var e=/\n(?!$)/g,t=Prism.plugins.lineNumbers={getLine:function(e,t){if("PRE"===e.tagName&&e.classList.contains("line-numbers")){var n=e.querySelector(".line-numbers-rows");if(n){var r=parseInt(e.getAttribute("data-start"),10)||1,i=r+(n.children.length-1);ti&&(t=i);var o=t-r;return n.children[o]}}},resize:function(e){r([e])},assumeViewportIndependence:!0},n=void 0;window.addEventListener("resize",(function(){t.assumeViewportIndependence&&n===window.innerWidth||(n=window.innerWidth,r(Array.prototype.slice.call(document.querySelectorAll("pre.line-numbers"))))})),Prism.hooks.add("complete",(function(t){if(t.code){var n=t.element,i=n.parentNode;if(i&&/pre/i.test(i.nodeName)&&!n.querySelector(".line-numbers-rows")&&Prism.util.isActive(n,"line-numbers")){n.classList.remove("line-numbers"),i.classList.add("line-numbers");var o,a=t.code.match(e),s=a?a.length+1:1,l=new Array(s+1).join("");(o=document.createElement("span")).setAttribute("aria-hidden","true"),o.className="line-numbers-rows",o.innerHTML=l,i.hasAttribute("data-start")&&(i.style.counterReset="linenumber "+(parseInt(i.getAttribute("data-start"),10)-1)),t.element.appendChild(o),r([i]),Prism.hooks.run("line-numbers",t)}}})),Prism.hooks.add("line-numbers",(function(e){e.plugins=e.plugins||{},e.plugins.lineNumbers=!0}))}function r(t){if(0!=(t=t.filter((function(e){var t=function(e){if(!e)return null;return window.getComputedStyle?getComputedStyle(e):e.currentStyle||null}(e)["white-space"];return"pre-wrap"===t||"pre-line"===t}))).length){var n=t.map((function(t){var n=t.querySelector("code"),r=t.querySelector(".line-numbers-rows");if(n&&r){var i=t.querySelector(".line-numbers-sizer"),o=n.textContent.split(e);i||((i=document.createElement("span")).className="line-numbers-sizer",n.appendChild(i)),i.innerHTML="0",i.style.display="block";var a=i.getBoundingClientRect().height;return i.innerHTML="",{element:t,lines:o,lineHeights:[],oneLinerHeight:a,sizer:i}}})).filter(Boolean);n.forEach((function(e){var t=e.sizer,n=e.lines,r=e.lineHeights,i=e.oneLinerHeight;r[n.length-1]=void 0,n.forEach((function(e,n){if(e&&e.length>1){var o=t.appendChild(document.createElement("span"));o.style.display="block",o.textContent=e}else r[n]=i}))})),n.forEach((function(e){for(var t=e.sizer,n=e.lineHeights,r=0,i=0;i code {\n\tposition: relative;\n\twhite-space: inherit;\n}\n\n.line-numbers .line-numbers-rows {\n\tposition: absolute;\n\tpointer-events: none;\n\ttop: 0;\n\tfont-size: 100%;\n\tleft: -3.8em;\n\twidth: 3em; /* works for line-numbers below 1000 lines */\n\tletter-spacing: -1px;\n\tborder-right: 1px solid #999;\n\n\t-webkit-user-select: none;\n\t-moz-user-select: none;\n\t-ms-user-select: none;\n\tuser-select: none;\n\n}\n\n\t.line-numbers-rows > span {\n\t\tdisplay: block;\n\t\tcounter-increment: linenumber;\n\t}\n\n\t\t.line-numbers-rows > span:before {\n\t\t\tcontent: counter(linenumber);\n\t\t\tcolor: #999;\n\t\t\tdisplay: block;\n\t\t\tpadding-right: 0.8em;\n\t\t\ttext-align: right;\n\t\t}\n',""])},function(e,t,n){var r=n(466);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,'/**\n * GHColors theme by Avi Aryan (http://aviaryan.in)\n * Inspired by Github syntax coloring\n */\n\ncode[class*="language-"],\npre[class*="language-"] {\n\tcolor: #393A34;\n\tfont-family: "Consolas", "Bitstream Vera Sans Mono", "Courier New", Courier, monospace;\n\tdirection: ltr;\n\ttext-align: left;\n\twhite-space: pre;\n\tword-spacing: normal;\n\tword-break: normal;\n\tfont-size: .9em;\n\tline-height: 1.2em;\n\n\t-moz-tab-size: 4;\n\t-o-tab-size: 4;\n\ttab-size: 4;\n\n\t-webkit-hyphens: none;\n\t-moz-hyphens: none;\n\t-ms-hyphens: none;\n\thyphens: none;\n}\n\npre > code[class*="language-"] {\n\tfont-size: 1em;\n}\n\npre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,\ncode[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {\n\tbackground: #b3d4fc;\n}\n\npre[class*="language-"]::selection, pre[class*="language-"] ::selection,\ncode[class*="language-"]::selection, code[class*="language-"] ::selection {\n\tbackground: #b3d4fc;\n}\n\n/* Code blocks */\npre[class*="language-"] {\n\tpadding: 1em;\n\tmargin: .5em 0;\n\toverflow: auto;\n\tborder: 1px solid #dddddd;\n\tbackground-color: white;\n}\n\n/* Inline code */\n:not(pre) > code[class*="language-"] {\n\tpadding: .2em;\n\tpadding-top: 1px;\n\tpadding-bottom: 1px;\n\tbackground: #f8f8f8;\n\tborder: 1px solid #dddddd;\n}\n\n.token.comment,\n.token.prolog,\n.token.doctype,\n.token.cdata {\n\tcolor: #999988;\n\tfont-style: italic;\n}\n\n.token.namespace {\n\topacity: .7;\n}\n\n.token.string,\n.token.attr-value {\n\tcolor: #e3116c;\n}\n\n.token.punctuation,\n.token.operator {\n\tcolor: #393A34; /* no highlight */\n}\n\n.token.entity,\n.token.url,\n.token.symbol,\n.token.number,\n.token.boolean,\n.token.variable,\n.token.constant,\n.token.property,\n.token.regex,\n.token.inserted {\n\tcolor: #36acaa;\n}\n\n.token.atrule,\n.token.keyword,\n.token.attr-name,\n.language-autohotkey .token.selector {\n\tcolor: #00a4db;\n}\n\n.token.function,\n.token.deleted,\n.language-autohotkey .token.tag {\n\tcolor: #9a050f;\n}\n\n.token.tag,\n.token.selector,\n.language-autohotkey .token.keyword {\n\tcolor: #00009f;\n}\n\n.token.important,\n.token.function,\n.token.bold {\n\tfont-weight: bold;\n}\n\n.token.italic {\n\tfont-style: italic;\n}\n',""])},function(e,t,n){n(31);const r=n(21),i=n(148),o=n(203),a=n(468);angular.module("dbt").factory("graph",["$state","$window","$q","selectorService","project","locationService",function(e,t,n,s,l,c){var u={vertical:{userPanningEnabled:!1,boxSelectionEnabled:!1,maxZoom:1.5},horizontal:{userPanningEnabled:!0,boxSelectionEnabled:!1,maxZoom:1,minZoom:.05}},d={none:{name:"null"},left_right:{name:"dagre",rankDir:"LR",rankSep:200,edgeSep:30,nodeSep:50},top_down:{name:"preset",positions:function(t){var n=e.params.unique_id;if(!n)return{x:0,y:0};var a=f.graph.pristine.dag,s=r.sortBy(o.ancestorNodes(a,n,1)),l=r.sortBy(o.descendentNodes(a,n,1)),c=r.partial(r.includes,s),u=r.partial(r.includes,l),d=a.filterNodes(c),p=a.filterNodes(u);return function(e,t,n,i){console.log("Getting position for ",i,". Primary: ",e);var o,a=100/(1+Math.max(t.length,n.length));if(e==i)return{x:0,y:0};if(r.includes(t,i))o={set:t,index:r.indexOf(t,i),factor:-1,type:"parent"};else{if(!r.includes(n,i))return{x:0,y:0};o={set:n,index:r.indexOf(n,i),factor:1,type:"child"}}var s=o.set.length;if("parent"==o.type)var l={x:(0+o.index)*a,y:-200-100*(s-o.index-1)};else l={x:(0+o.index)*a,y:200+100*(s-o.index-1)};return l}(n,i.alg.topsort(d),i.alg.topsort(p).reverse(),t.data("id"))}}},f={loading:!0,loaded:n.defer(),graph_element:null,orientation:"sidebar",expanded:!1,graph:{options:u.vertical,pristine:{nodes:{},edges:{},dag:null},elements:[],layout:d.none,style:[{selector:"edge.vertical",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#027599","arrow-scale":1.5,"line-color":"#027599",width:3,"target-distance-from-node":"5px","source-endpoint":"0% 50%","target-endpoint":"0deg"}},{selector:"edge.horizontal",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#006f8a","arrow-scale":1.5,"target-distance-from-node":"10px","source-distance-from-node":"5px","line-color":"#006f8a",width:3,"source-endpoint":"50% 0%","target-endpoint":"270deg"}},{selector:"edge[selected=1]",style:{"line-color":"#bd6bb6","target-arrow-color":"#bd6bb6","z-index":1}},{selector:'node[display="none"]',style:{display:"none"}},{selector:"node.vertical",style:{"text-margin-x":"5px","background-color":"#0094b3","font-size":"16px",shape:"ellipse",color:"#fff",width:"5px",height:"5px",padding:"5px",content:"data(label)","font-weight":300,"text-valign":"center","text-halign":"right"}},{selector:"node.horizontal",style:{"background-color":"#0094b3","font-size":"24px",shape:"roundrectangle",color:"#fff",width:"label",height:"label",padding:"12px",content:"data(label)","font-weight":300,"font-family":'-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Fira Sans", "Droid Sans", "Helvetica Neue", Helvetica, Arial, sans-serif',"text-valign":"center","text-halign":"center",ghost:"yes","ghost-offset-x":"2px","ghost-offset-y":"4px","ghost-opacity":.5,"text-outline-color":"#000","text-outline-width":"1px","text-outline-opacity":.2}},{selector:'node[resource_type="source"]',style:{"background-color":"#5fb825"}},{selector:'node[resource_type="exposure"]',style:{"background-color":"#ff694b"}},{selector:'node[resource_type="metric"]',style:{"background-color":"#ff5688"}},{selector:"node[node_color]",style:{"background-color":"data(node_color)"}},{selector:"node[selected=1]",style:{"background-color":"#bd6bb6"}},{selector:"node.horizontal[selected=1]",style:{"background-color":"#88447d"}},{selector:"node.horizontal.dirty",style:{"background-color":"#919599"}},{selector:"node[hidden=1]",style:{"background-color":"#919599","background-opacity":.5}}],ready:function(e){console.log("graph ready")}}};function p(e,t,n){var i=r.map(e,(function(e){return f.graph.pristine.nodes[e]})),o=[];r.flatten(r.each(e,(function(t){var n=f.graph.pristine.edges[t];r.each(n,(function(t){r.includes(e,t.data.target)&&r.includes(e,t.data.source)&&o.push(t)}))})));var s=r.compact(i).concat(r.compact(o));return r.each(f.graph.elements,(function(e){e.data.display="none",e.data.selected=0,e.data.hidden=0,e.classes=n})),r.each(s,(function(e){e.data.display="element",e.classes=n,t&&r.includes(t,e.data.unique_id)&&(e.data.selected=1),r.get(e,["data","docs","show"],!0)||(e.data.hidden=1);var i=r.get(e,["data","docs","node_color"]);i&&a.isValidColor(i)&&(e.data.node_color=i)})),f.graph.elements=r.filter(s,(function(e){return"element"==e.data.display})),e}function h(e,t,n){var r=f.graph.pristine.dag;if(r){var i=f.graph.pristine.nodes,o=s.selectNodes(r,i,e),a=n?o.matched:[];return p(o.selected,a,t)}}return f.setGraphReady=function(e){f.loading=!1,f.loaded.resolve(),f.graph_element=e},f.ready=function(e){f.loaded.promise.then((function(){e(f)}))},f.manifest={},f.packages=[],f.selected_node=null,f.getCanvasHeight=function(){return.8*t.innerHeight+"px"},l.ready((function(e){f.manifest=e,f.packages=r.uniq(r.map(f.manifest.nodes,"package_name")),r.each(r.filter(f.manifest.nodes,(function(e){var t=r.includes(["model","seed","source","snapshot","analysis","exposure","metric","operation"],e.resource_type),n="test"==e.resource_type&&!e.hasOwnProperty("test_metadata");return t||n})),(function(e){var t={group:"nodes",data:r.assign(e,{parent:e.package_name,id:e.unique_id,is_group:"false"})};f.graph.pristine.nodes[e.unique_id]=t})),r.each(f.manifest.parent_map,(function(e,t){r.each(e,(function(e){var n=f.manifest.nodes[e],i=f.manifest.nodes[t];if(r.includes(["model","source","seed","snapshot","metric"],n.resource_type)&&("test"!=i.resource_type||!i.hasOwnProperty("test_metadata"))){var o=n.unique_id+"|"+i.unique_id,a={group:"edges",data:{source:n.unique_id,target:i.unique_id,unique_id:o}},s=i.unique_id;f.graph.pristine.edges[s]||(f.graph.pristine.edges[s]=[]),f.graph.pristine.edges[s].push(a)}}))}));var t=new i.Graph({directed:!0});r.each(f.graph.pristine.nodes,(function(e){t.setNode(e.data.unique_id,e.data.name)})),r.each(f.graph.pristine.edges,(function(e){r.each(e,(function(e){t.setEdge(e.data.source,e.data.target)}))})),f.graph.pristine.dag=t,f.graph.elements=r.flatten(r.values(f.graph.pristine.nodes).concat(r.values(f.graph.pristine.edges))),p(t.nodes())})),f.hideGraph=function(){f.orientation="sidebar",f.expanded=!1},f.showVerticalGraph=function(e,t){f.orientation="sidebar",t&&(f.expanded=!0);var n=h(r.assign({},s.options,{include:"+"+e+"+",exclude:"",hops:1}),"vertical",!0);return f.graph.layout=d.top_down,f.graph.options=u.vertical,n},f.showFullGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=r.assign({},s.options);e?(t.include="+"+e+"+",t.exclude=""):(t.include="",t.exclude="");var n=h(t,"horizontal",!0);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(t),n},f.updateGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=h(e,"horizontal",!1);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(e),t},f.deselectNodes=function(){"fullscreen"==f.orientation&&f.graph_element.elements().data("selected",0)},f.selectNode=function(e){if("fullscreen"==f.orientation){f.graph.pristine.nodes[e];var t=f.graph.pristine.dag,n=r.indexBy(o.ancestorNodes(t,e)),i=r.indexBy(o.descendentNodes(t,e));n[e]=e,i[e]=e;var a=f.graph_element;r.each(f.graph.elements,(function(t){var r=a.$id(t.data.id);n[t.data.source]&&n[t.data.target]||i[t.data.source]&&i[t.data.target]||t.data.unique_id==e?r.data("selected",1):r.data("selected",0)}))}},f.markDirty=function(e){f.markAllClean(),r.each(e,(function(e){f.graph_element.$id(e).addClass("dirty")}))},f.markAllClean=function(){f.graph_element&&f.graph_element.elements().removeClass("dirty")},f}])},function(e,t,n){"use strict";n.r(t),n.d(t,"isValidColor",(function(){return i}));const r=new Set(["aliceblue","antiquewhite","aqua","aquamarine","azure","beige","bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood","cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk","crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgreen","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dodgerblue","firebrick","floralwhite","forestgreen","fuchsia","ghostwhite","gold","goldenrod","gray","green","greenyellow","honeydew","hotpink","indianred","indigo","ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral","lightcyan","lightgoldenrodyellow","lightgray","lightgreen","lightpink","lightsalmon","lightsalmon","lightseagreen","lightskyblue","lightslategray","lightsteelblue","lightyellow","lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid","mediumpurple","mediumseagreen","mediumslateblue","mediumslateblue","mediumspringgreen","mediumturquoise","mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy","oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen","paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue","purple","rebeccapurple","red","rosybrown","royalblue","saddlebrown","salmon","sandybrown","seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","snow","springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white","whitesmoke","yellow","yellowgreen"]);function i(e){if(!e)return!1;const t=e.trim().toLowerCase();if(""===t)return!1;const n=t.match(/^#([A-Fa-f0-9]{3}){1,2}$/),i=r.has(t);return Boolean(n)||i}},function(e,t,n){n(31);const r=n(21),i=n(470);angular.module("dbt").factory("selectorService",["$state",function(e){var t={include:"",exclude:"",packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"],depth:1},n={view_node:null,selection:{clean:r.clone(t),dirty:r.clone(t)},options:{packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"]},init:function(e){r.each(e,(function(e,r){n.options[r]=e,t[r]=e,n.selection.clean[r]=e,n.selection.dirty[r]=e}))},resetSelection:function(e){var i={include:e&&r.includes(["model","seed","snapshot"],e.resource_type)?"+"+e.name+"+":e&&"source"==e.resource_type?"+source:"+e.source_name+"."+e.name+"+":e&&"exposure"==e.resource_type?"+exposure:"+e.name:e&&"metric"==e.resource_type?"+metric:"+e.name:e&&r.includes(["analysis","test"],e.resource_type)?"+"+e.name:""},o=r.assign({},t,i);n.selection.clean=r.clone(o),n.selection.dirty=r.clone(o),n.view_node=e},getViewNode:function(){return n.view_node},excludeNode:function(e,t){var r,i=n.selection.dirty.exclude,o=t.parents?"+":"",a=t.children?"+":"",s=i.length>0?" ":"";"source"==e.resource_type?(o+="source:",r=e.source_name+"."+e.name):["exposure","metric"].indexOf(e.resource_type)>-1?(o+=e.resource_type+":",r=e.name):r=e.name;var l=i+s+o+r+a;return n.selection.dirty.exclude=l,n.updateSelection()},selectSource:function(e,t){var r="source:"+e+(t.children?"+":"");return n.selection.dirty.include=r,n.updateSelection()},clearViewNode:function(){n.view_node=null},isDirty:function(){return!r.isEqual(n.selection.clean,n.selection.dirty)},updateSelection:function(){return n.selection.clean=r.clone(n.selection.dirty),n.selection.clean},selectNodes:function(e,t,n){return i.selectNodes(e,t,n)}};return n}])},function(e,t,n){const r=n(21),i=n(471);function o(e,t){return t||(t=" "),r.filter(r.uniq(e.split(t)),(function(e){return e.length>0}))}function a(e){var t={raw:e,select_at:!1,select_children:!1,children_depth:null,select_parents:!1,parents_depth:null};const n=new RegExp(""+/^/.source+/(?(\@))?/.source+/(?((?(\d*))\+))?/.source+/((?([\w.]+)):)?/.source+/(?(.*?))/.source+/(?(\+(?(\d*))))?/.source+/$/.source).exec(e).groups;t.select_at="@"==n.childs_parents,t.select_parents=!!n.parents,t.select_children=!!n.children,n.parents_depth&&(t.parents_depth=parseInt(n.parents_depth)),n.children_depth&&(t.children_depth=parseInt(n.children_depth));var r=n.method,i=n.value;return r?-1!=r.indexOf(".")&&([r,selector_modifier]=r.split(".",2),i={config:selector_modifier,value:i}):r="implicit",t.selector_type=r,t.selector_value=i,t}function s(e){var t=o(e," ");return r.map(t,(function(e){var t=o(e,",");return t.length>1?{method:"intersect",selectors:r.map(t,a)}:{method:"none",selectors:r.map([e],a)}}))}function l(e,t){var n=s(e),i=null,o=null;return r.each(n,(function(e){var n="intersect"==e.method?r.intersection:r.union;r.each(e.selectors,(function(e){var r=t(e);null===i?(i=r.matched,o=r.selected):(i=n(i,r.matched),o=n(o,r.selected))}))})),{matched:i||[],selected:o||[]}}e.exports={splitSpecs:o,parseSpec:a,parseSpecs:s,buildSpec:function(e,t,n){return{include:s(e),exclude:s(t),hops:n}},applySpec:l,selectNodes:function(e,t,n){n.include,n.exclude;var o,a=r.partial(i.getNodesFromSpec,e,t,n.hops);r.values(t),o=0==n.include.trim().length?{selected:e.nodes(),matched:[]}:l(n.include,a);var s=l(n.exclude,a),c=o.selected,u=o.matched;c=r.difference(c,s.selected),u=r.difference(u,s.matched);var d=[];return r.each(c,(function(e){var i=t[e];i.data.tags||(i.data.tags=[]);var o=r.includes(n.packages,i.data.package_name),a=r.intersection(n.tags,i.data.tags).length>0,s=r.includes(n.tags,null)&&0==i.data.tags.length,l=r.includes(n.resource_types,i.data.resource_type);o&&(a||s)&&l||d.push(i.data.unique_id)})),{selected:r.difference(c,d),matched:r.difference(u,d)}}}},function(e,t,n){const r=n(21),i=n(203);var o="fqn",a="tag",s="source",l="exposure",c="metric",u="path",d="file",f="package",p="config",h="test_name",g="test_type",m={};function v(e,t){if(t===r.last(e))return!0;var n=e.reduce((e,t)=>e.concat(t.split(".")),[]),i=t.split(".");if(n.length-1||!r.hasOwnProperty("test_metadata")&&["data","singular"].indexOf(t)>-1)&&n.push(r)})),n}function $(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("source"==r.resource_type){var i,o,a=r.source_name,s=r.name;-1!=t.indexOf(".")?[i,o]=t.split(".",2):(i=t,o=null),("*"==i||i==a&&"*"===o||i==a&&o===s||i==a&&null===o)&&n.push(e.data)}})),n}m["implicit"]=function(e,t){var n=b(e,t),i=y(e,t),o=[];t.toLowerCase().endsWith(".sql")&&(o=x(e,t));var a=r.uniq([].concat(r.map(n,"unique_id"),r.map(i,"unique_id"),r.map(o,"unique_id")));return r.map(a,t=>e[t].data)},m[o]=b,m[a]=w,m[s]=$,m[l]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("exposure"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[c]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("metric"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[u]=y,m[d]=x,m[f]=k,m[p]=A,m[h]=E,m[g]=S,e.exports={isFQNMatch:v,getNodesByFQN:b,getNodesByTag:w,getNodesBySource:$,getNodesByPath:y,getNodesByPackage:k,getNodesByConfig:A,getNodesByTestName:E,getNodesByTestType:S,getNodesFromSpec:function(e,t,n,o){const a=m[o.selector_type];if(!a)return console.log("Node matcher for selector",o.selector_type,"is invalid"),{selected:[],matched:[]};var s=a(t,o.selector_value),l=[],c=[];return r.each(s,(function(t){var a=t.unique_id;c.push(t.unique_id);var s=[],u=[],d=[];if(o.select_at&&(d=r.union(i.selectAt(e,a))),o.select_parents){var f=n||o.parents_depth;s=i.ancestorNodes(e,a,f)}if(o.select_children){f=n||o.children_depth;u=i.descendentNodes(e,a,f)}l=r.union([a],l,u,s,d)})),{selected:l,matched:c}}}},function(e,t,n){const r=n(9);n(473);r.module("dbt").factory("trackingService",["$location","selectorService","$rootScope",function(e,t,n){var r={initialized:!1,snowplow:null,project_id:null,init:function(e){r.initialized||(r.initialized=!0,r.project_id=e.project_id,!0===e.track&&r.turn_on_tracking())},isHosted:function(){return window.location.hostname.indexOf(".getdbt.com")>-1},turn_on_tracking:function(){var e,t,n,i,o,a;e=window,t=document,n="script",e[i="snowplow"]||(e.GlobalSnowplowNamespace=e.GlobalSnowplowNamespace||[],e.GlobalSnowplowNamespace.push(i),e[i]=function(){(e[i].q=e[i].q||[]).push(arguments)},e[i].q=e[i].q||[],o=t.createElement(n),a=t.getElementsByTagName(n)[0],o.async=1,o.src="//d1fc8wv8zag5ca.cloudfront.net/2.9.0/sp.js",a.parentNode.insertBefore(o,a));var s={appId:"dbt-docs",forceSecureTracker:!0,respectDoNotTrack:!0,userFingerprint:!1,contexts:{webPage:!0}};r.isHosted()&&(s.cookieDomain=".getdbt.com"),r.snowplow=window.snowplow,r.snowplow("newTracker","sp","fishtownanalytics.sinter-collect.com",s),r.snowplow("enableActivityTracking",30,30),r.track_pageview()},fuzzUrls:function(){r.isHosted()||(r.snowplow("setCustomUrl","https://fuzzed.getdbt.com/"),r.snowplow("setReferrerUrl","https://fuzzed.getdbt.com/"))},getContext:function(){return[{schema:"iglu:com.dbt/dbt_docs/jsonschema/1-0-0",data:{is_cloud_hosted:r.isHosted(),core_project_id:r.project_id}}]},track_pageview:function(){if(r.snowplow){r.fuzzUrls();r.snowplow("trackPageView",null,r.getContext())}},track_event:function(e,t,n,i){r.snowplow&&(r.fuzzUrls(),r.snowplow("trackStructEvent","dbt-docs",e,t,n,i,r.getContext()))},track_graph_interaction:function(e,t){r.snowplow&&(r.fuzzUrls(),r.track_event("graph","interact",e,t))}};return r}])},function(e,t,n){var r,i,o,a,s;r=n(474),i=n(204).utf8,o=n(475),a=n(204).bin,(s=function(e,t){e.constructor==String?e=t&&"binary"===t.encoding?a.stringToBytes(e):i.stringToBytes(e):o(e)?e=Array.prototype.slice.call(e,0):Array.isArray(e)||e.constructor===Uint8Array||(e=e.toString());for(var n=r.bytesToWords(e),l=8*e.length,c=1732584193,u=-271733879,d=-1732584194,f=271733878,p=0;p>>24)|4278255360&(n[p]<<24|n[p]>>>8);n[l>>>5]|=128<>>9<<4)]=l;var h=s._ff,g=s._gg,m=s._hh,v=s._ii;for(p=0;p>>0,u=u+y>>>0,d=d+x>>>0,f=f+w>>>0}return r.endian([c,u,d,f])})._ff=function(e,t,n,r,i,o,a){var s=e+(t&n|~t&r)+(i>>>0)+a;return(s<>>32-o)+t},s._gg=function(e,t,n,r,i,o,a){var s=e+(t&r|n&~r)+(i>>>0)+a;return(s<>>32-o)+t},s._hh=function(e,t,n,r,i,o,a){var s=e+(t^n^r)+(i>>>0)+a;return(s<>>32-o)+t},s._ii=function(e,t,n,r,i,o,a){var s=e+(n^(t|~r))+(i>>>0)+a;return(s<>>32-o)+t},s._blocksize=16,s._digestsize=16,e.exports=function(e,t){if(null==e)throw new Error("Illegal argument "+e);var n=r.wordsToBytes(s(e,t));return t&&t.asBytes?n:t&&t.asString?a.bytesToString(n):r.bytesToHex(n)}},function(e,t){var n,r;n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",r={rotl:function(e,t){return e<>>32-t},rotr:function(e,t){return e<<32-t|e>>>t},endian:function(e){if(e.constructor==Number)return 16711935&r.rotl(e,8)|4278255360&r.rotl(e,24);for(var t=0;t0;e--)t.push(Math.floor(256*Math.random()));return t},bytesToWords:function(e){for(var t=[],n=0,r=0;n>>5]|=e[n]<<24-r%32;return t},wordsToBytes:function(e){for(var t=[],n=0;n<32*e.length;n+=8)t.push(e[n>>>5]>>>24-n%32&255);return t},bytesToHex:function(e){for(var t=[],n=0;n>>4).toString(16)),t.push((15&e[n]).toString(16));return t.join("")},hexToBytes:function(e){for(var t=[],n=0;n>>6*(3-o)&63)):t.push("=");return t.join("")},base64ToBytes:function(e){e=e.replace(/[^A-Z0-9+\/]/gi,"");for(var t=[],r=0,i=0;r>>6-2*i);return t}},e.exports=r},function(e,t){function n(e){return!!e.constructor&&"function"==typeof e.constructor.isBuffer&&e.constructor.isBuffer(e)} +/*! Runge-Kutta spring physics function generator. Adapted from Framer.js, copyright Koen Bok. MIT License: http://en.wikipedia.org/wiki/MIT_License */var r=function(){function e(e){return-e.tension*e.x-e.friction*e.v}function t(t,n,r){var i={x:t.x+r.dx*n,v:t.v+r.dv*n,tension:t.tension,friction:t.friction};return{dx:i.v,dv:e(i)}}function n(n,r){var i={dx:n.v,dv:e(n)},o=t(n,.5*r,i),a=t(n,.5*r,o),s=t(n,r,a),l=1/6*(i.dx+2*(o.dx+a.dx)+s.dx),c=1/6*(i.dv+2*(o.dv+a.dv)+s.dv);return n.x=n.x+l*r,n.v=n.v+c*r,n}return function e(t,r,i){var o,a={x:-1,v:0,tension:null,friction:null},s=[0],l=0,c=void 0,u=void 0;for(t=parseFloat(t)||500,r=parseFloat(r)||20,i=i||null,a.tension=t,a.friction=r,c=(o=null!==i)?(l=e(t,r))/i*.016:.016;u=n(u||a,c),s.push(1+u.x),l+=16,Math.abs(u.x)>1e-4&&Math.abs(u.v)>1e-4;);return o?function(e){return s[e*(s.length-1)|0]}:l}}();e.exports=r},function(e,t,n){"use strict";var r=n(0);function i(e,t,n,r,i){if(1===r)return n;var o=i(t,n,r);return null==e||((e.roundValue||e.color)&&(o=Math.round(o)),void 0!==e.min&&(o=Math.max(o,e.min)),void 0!==e.max&&(o=Math.min(o,e.max))),o}function o(e,t){return null!=e.pfValue||null!=e.value?null==e.pfValue||null!=t&&"%"===t.type.units?e.value:e.pfValue:e}e.exports=function(e,t,n,a,s){var l=null!=s?s.type:null;n<0?n=0:n>1&&(n=1);var c=o(e,s),u=o(t,s);if(r.number(c)&&r.number(u))return i(l,c,u,n,a);if(r.array(c)&&r.array(u)){for(var d=[],f=0;f0},startBatch:function(){var e=this._private;return null==e.batchCount&&(e.batchCount=0),0===e.batchCount&&(e.batchingStyle=e.batchingNotify=!0,e.batchStyleEles=this.collection(),e.batchNotifyEles=this.collection(),e.batchNotifyTypes=[],e.batchNotifyTypes.ids={}),e.batchCount++,this},endBatch:function(){var e=this._private;return e.batchCount--,0===e.batchCount&&(e.batchingStyle=!1,e.batchStyleEles.updateStyle(),e.batchingNotify=!1,this.notify({type:e.batchNotifyTypes,eles:e.batchNotifyEles})),this},batch:function(e){return this.startBatch(),e(),this.endBatch(),this},batchData:function(e){var t=this;return this.batch((function(){for(var n=Object.keys(e),r=0;r0;)e.removeChild(e.childNodes[0]);this._private.renderer=null},onRender:function(e){return this.on("render",e)},offRender:function(e){return this.off("render",e)}};i.invalidateDimensions=i.resize,e.exports=i},function(e,t,n){"use strict";var r=n(0),i=n(7),o={collection:function(e,t){return r.string(e)?this.$(e):r.elementOrCollection(e)?e.collection():r.array(e)?new i(this,e,t):new i(this)},nodes:function(e){var t=this.$((function(e){return e.isNode()}));return e?t.filter(e):t},edges:function(e){var t=this.$((function(e){return e.isEdge()}));return e?t.filter(e):t},$:function(e){var t=this._private.elements;return e?t.filter(e):t.spawnSelf()},mutableElements:function(){return this._private.elements}};o.elements=o.filter=o.$,e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(18),o={style:function(e){return e&&this.setStyle(e).update(),this._private.style},setStyle:function(e){var t=this._private;return r.stylesheet(e)?t.style=e.generateStyle(this):r.array(e)?t.style=i.fromJson(this,e):r.string(e)?t.style=i.fromString(this,e):t.style=i(this),t.style}};e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(5),a={apply:function(e){var t=this._private,n=t.cy.collection();t.newStyle&&(t.contextStyles={},t.propDiffs={},this.cleanElements(e,!0));for(var r=0;r0;if(c||u){var d=void 0;c&&u||c?d=l.properties:u&&(d=l.mappedProperties);for(var f=0;f0){n=!0;break}t.hasPie=n;var i=e.pstyle("text-transform").strValue,o=e.pstyle("label").strValue,a=e.pstyle("source-label").strValue,s=e.pstyle("target-label").strValue,l=e.pstyle("font-style").strValue,c=e.pstyle("font-size").pfValue+"px",u=e.pstyle("font-family").strValue,d=e.pstyle("font-weight").strValue,f=l+"$"+c+"$"+u+"$"+d+"$"+i+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-outline-width").pfValue+"$"+e.pstyle("text-wrap").strValue+"$"+e.pstyle("text-max-width").pfValue;t.labelStyleKey=f,t.sourceLabelKey=f+"$"+a,t.targetLabelKey=f+"$"+s,t.labelKey=f+"$"+o,t.fontKey=l+"$"+d+"$"+c+"$"+u,t.styleKey=Date.now()}},applyParsedProperty:function(e,t){var n=this,o=t,a=e._private.style,s=void 0,l=n.types,c=n.properties[o.name].type,u=o.bypass,d=a[o.name],f=d&&d.bypass,p=e._private,h=function(){n.checkZOrderTrigger(e,o.name,d?d.value:null,o.value)};if("curve-style"===t.name&&"haystack"===t.value&&e.isEdge()&&(e.isLoop()||e.source().isParent()||e.target().isParent())&&(o=t=this.parse(t.name,"bezier",u)),o.delete)return a[o.name]=void 0,h(),!0;if(o.deleteBypassed)return d?!!d.bypass&&(d.bypassed=void 0,h(),!0):(h(),!0);if(o.deleteBypass)return d?!!d.bypass&&(a[o.name]=d.bypassed,h(),!0):(h(),!0);var g=function(){r.error("Do not assign mappings to elements without corresponding data (e.g. ele `"+e.id()+"` for property `"+o.name+"` with data field `"+o.field+"`); try a `["+o.field+"]` selector to limit scope to elements with `"+o.field+"` defined")};switch(o.mapped){case l.mapData:for(var m=o.field.split("."),v=p.data,b=0;b1&&(y=1),c.color){var x=o.valueMin[0],w=o.valueMax[0],k=o.valueMin[1],A=o.valueMax[1],E=o.valueMin[2],S=o.valueMax[2],$=null==o.valueMin[3]?1:o.valueMin[3],C=null==o.valueMax[3]?1:o.valueMax[3],_=[Math.round(x+(w-x)*y),Math.round(k+(A-k)*y),Math.round(E+(S-E)*y),Math.round($+(C-$)*y)];s={bypass:o.bypass,name:o.name,value:_,strValue:"rgb("+_[0]+", "+_[1]+", "+_[2]+")"}}else{if(!c.number)return!1;var O=o.valueMin+(o.valueMax-o.valueMin)*y;s=this.parse(o.name,O,o.bypass,"mapping")}s||(s=this.parse(o.name,d.strValue,o.bypass,"mapping")),s||g(),s.mapping=o,o=s;break;case l.data:var j=o.field.split("."),T=p.data;if(T)for(var P=0;P0&&l>0){for(var u={},d=!1,f=0;f0?e.delayAnimation(c).play().promise().then(t):t()})).then((function(){return e.animation({style:u,duration:l,easing:e.pstyle("transition-timing-function").value,queue:!1}).play().promise()})).then((function(){r.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1}))}else a.transitioning&&(this.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1)},checkZOrderTrigger:function(e,t,n,r){var i=this.properties[t];null==i.triggersZOrder||null!=n&&!i.triggersZOrder(n,r)||this._private.cy.notify({type:"zorder",eles:e})}};e.exports=a},function(e,t,n){"use strict";var r=n(0),i=n(1),o={applyBypass:function(e,t,n,o){var a=[];if("*"===t||"**"===t){if(void 0!==n)for(var s=0;sn.length?t.substr(n.length):""}function l(){o=o.length>a.length?o.substr(a.length):""}for(t=t.replace(/[/][*](\s|.)+?[*][/]/g,"");!t.match(/^\s*$/);){var c=t.match(/^\s*((?:.|\s)+?)\s*\{((?:.|\s)+?)\}/);if(!c){r.error("Halting stylesheet parsing: String stylesheet contains more to parse but no selector and block found in: "+t);break}n=c[0];var u=c[1];if("core"!==u&&new i(u)._private.invalid)r.error("Skipping parsing of block: Invalid selector found in string stylesheet: "+u),s();else{var d=c[2],f=!1;o=d;for(var p=[];!o.match(/^\s*$/);){var h=o.match(/^\s*(.+?)\s*:\s*(.+?)\s*;/);if(!h){r.error("Skipping parsing of block: Invalid formatting of style property and value definitions found in:"+d),f=!0;break}a=h[0];var g=h[1],m=h[2];this.properties[g]?this.parse(g,m)?(p.push({name:g,val:m}),l()):(r.error("Skipping property: Invalid property definition in: "+a),l()):(r.error("Skipping property: Invalid property name in: "+a),l())}if(f){s();break}this.selector(u);for(var v=0;v node").css({shape:"rectangle",padding:10,"background-color":"#eee","border-color":"#ccc","border-width":1}).selector("edge").css({width:3,"curve-style":"haystack"}).selector(":parent <-> node").css({"curve-style":"bezier","source-endpoint":"outside-to-line","target-endpoint":"outside-to-line"}).selector(":selected").css({"background-color":"#0169D9","line-color":"#0169D9","source-arrow-color":"#0169D9","target-arrow-color":"#0169D9","mid-source-arrow-color":"#0169D9","mid-target-arrow-color":"#0169D9"}).selector("node:parent:selected").css({"background-color":"#CCE1F9","border-color":"#aec8e5"}).selector(":active").css({"overlay-color":"black","overlay-padding":10,"overlay-opacity":.25}).selector("core").css({"selection-box-color":"#ddd","selection-box-opacity":.65,"selection-box-border-color":"#aaa","selection-box-border-width":1,"active-bg-color":"black","active-bg-opacity":.15,"active-bg-size":30,"outside-texture-bg-color":"#000","outside-texture-bg-opacity":.125}),this.defaultLength=this.length},e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(2),a={parse:function(e,t,n,o){if(i.fn(t))return this.parseImplWarn(e,t,n,o);var a=[e,t,n,"mapping"===o||!0===o||!1===o||null==o?"dontcare":o].join("$"),s=this.propCache=this.propCache||{},l=void 0;return(l=s[a])||(l=s[a]=this.parseImplWarn(e,t,n,o)),(n||"mapping"===o)&&(l=r.copy(l))&&(l.value=r.copy(l.value)),l},parseImplWarn:function(e,t,n,i){var o=this.parseImpl(e,t,n,i);return o||null==t||r.error("The style property `%s: %s` is invalid",e,t),o},parseImpl:function(e,t,n,a){e=r.camel2dash(e);var s=this.properties[e],l=t,c=this.types;if(!s)return null;if(void 0===t)return null;s.alias&&(s=s.pointsTo,e=s.name);var u=i.string(t);u&&(t=t.trim());var d=s.type;if(!d)return null;if(n&&(""===t||null===t))return{name:e,value:t,bypass:!0,deleteBypass:!0};if(i.fn(t))return{name:e,value:t,strValue:"fn",mapped:c.fn,bypass:n};var f=void 0,p=void 0;if(!u||a);else{if(f=new RegExp(c.data.regex).exec(t)){if(n)return!1;var h=c.data;return{name:e,value:f,strValue:""+t,mapped:h,field:f[1],bypass:n}}if(p=new RegExp(c.mapData.regex).exec(t)){if(n)return!1;if(d.multiple)return!1;var g=c.mapData;if(!d.color&&!d.number)return!1;var m=this.parse(e,p[4]);if(!m||m.mapped)return!1;var v=this.parse(e,p[5]);if(!v||v.mapped)return!1;if(m.value===v.value)return!1;if(d.color){var b=m.value,y=v.value;if(!(b[0]!==y[0]||b[1]!==y[1]||b[2]!==y[2]||b[3]!==y[3]&&(null!=b[3]&&1!==b[3]||null!=y[3]&&1!==y[3])))return!1}return{name:e,value:p,strValue:""+t,mapped:g,field:p[1],fieldMin:parseFloat(p[2]),fieldMax:parseFloat(p[3]),valueMin:m.value,valueMax:v.value,bypass:n}}}if(d.multiple&&"multiple"!==a){var x=void 0;if(x=u?t.split(/\s+/):i.array(t)?t:[t],d.evenMultiple&&x.length%2!=0)return null;for(var w=[],k=[],A=[],E=!1,S=0;Sd.max||d.strictMax&&t===d.max))return null;var P={name:e,value:t,strValue:""+t+(_||""),units:_,bypass:n};return d.unitless||"px"!==_&&"em"!==_?P.pfValue=t:P.pfValue="px"!==_&&_?this.getEmSizeInPixels()*t:t,"ms"!==_&&"s"!==_||(P.pfValue="ms"===_?t:1e3*t),"deg"!==_&&"rad"!==_||(P.pfValue="rad"===_?t:o.deg2rad(t)),"%"===_&&(P.pfValue=t/100),P}if(d.propList){var D=[],R=""+t;if("none"===R);else{for(var I=R.split(","),N=0;N0&&s>0&&!isNaN(n.w)&&!isNaN(n.h)&&n.w>0&&n.h>0)return{zoom:l=(l=(l=Math.min((a-2*t)/n.w,(s-2*t)/n.h))>this._private.maxZoom?this._private.maxZoom:l)t.maxZoom?t.maxZoom:s)t.maxZoom||!t.zoomingEnabled?a=!0:(t.zoom=l,o.push("zoom"))}if(i&&(!a||!e.cancelOnFailedZoom)&&t.panningEnabled){var c=e.pan;r.number(c.x)&&(t.pan.x=c.x,s=!1),r.number(c.y)&&(t.pan.y=c.y,s=!1),s||o.push("pan")}return o.length>0&&(o.push("viewport"),this.emit(o.join(" ")),this.notify({type:"viewport"})),this},center:function(e){var t=this.getCenterPan(e);return t&&(this._private.pan=t,this.emit("pan viewport"),this.notify({type:"viewport"})),this},getCenterPan:function(e,t){if(this._private.panningEnabled){if(r.string(e)){var n=e;e=this.mutableElements().filter(n)}else r.elementOrCollection(e)||(e=this.mutableElements());if(0!==e.length){var i=e.boundingBox(),o=this.width(),a=this.height();return{x:(o-(t=void 0===t?this._private.zoom:t)*(i.x1+i.x2))/2,y:(a-t*(i.y1+i.y2))/2}}}},reset:function(){return this._private.panningEnabled&&this._private.zoomingEnabled?(this.viewport({pan:{x:0,y:0},zoom:1}),this):this},invalidateSize:function(){this._private.sizeCache=null},size:function(){var e,t,n=this._private,r=n.container;return n.sizeCache=n.sizeCache||(r?(e=i.getComputedStyle(r),t=function(t){return parseFloat(e.getPropertyValue(t))},{width:r.clientWidth-t("padding-left")-t("padding-right"),height:r.clientHeight-t("padding-top")-t("padding-bottom")}):{width:1,height:1})},width:function(){return this.size().width},height:function(){return this.size().height},extent:function(){var e=this._private.pan,t=this._private.zoom,n=this.renderedExtent(),r={x1:(n.x1-e.x)/t,x2:(n.x2-e.x)/t,y1:(n.y1-e.y)/t,y2:(n.y2-e.y)/t};return r.w=r.x2-r.x1,r.h=r.y2-r.y1,r},renderedExtent:function(){var e=this.width(),t=this.height();return{x1:0,y1:0,x2:e,y2:t,w:e,h:t}}};a.centre=a.center,a.autolockNodes=a.autolock,a.autoungrabifyNodes=a.autoungrabify,e.exports=a},function(e,t,n){"use strict";var r=n(1),i=n(4),o=n(7),a=n(12),s=n(95),l=n(0),c=n(11),u={},d={};function f(e,t,n){var s=n,d=function(n){r.error("Can not register `"+t+"` for `"+e+"` since `"+n+"` already exists in the prototype and can not be overridden")};if("core"===e){if(a.prototype[t])return d(t);a.prototype[t]=n}else if("collection"===e){if(o.prototype[t])return d(t);o.prototype[t]=n}else if("layout"===e){for(var f=function(e){this.options=e,n.call(this,e),l.plainObject(this._private)||(this._private={}),this._private.cy=e.cy,this._private.listeners=[],this.createEmitter()},h=f.prototype=Object.create(n.prototype),g=[],m=0;m0;)m();c=n.collection();for(var v=function(e){var t=h[e],n=t.maxDegree(!1),r=t.filter((function(e){return e.degree(!1)===n}));c=c.add(r)},b=0;by.length-1;)y.push([]);y[J].push(X),Z.depth=J,Z.index=y[J].length-1}N()}var K=0;if(t.avoidOverlap)for(var ee=0;eec||0===t)&&(r+=l/u,i++)}return r/=i=Math.max(1,i),0===i&&(r=void 0),ie[e.id()]=r,r},ae=function(e,t){return oe(e)-oe(t)},se=0;se<3;se++){for(var le=0;le0&&y[0].length<=3?u/2:0),f=2*Math.PI/y[i].length*o;return 0===i&&1===y[0].length&&(d=1),{x:de+d*Math.cos(f),y:fe+d*Math.sin(f)}}return{x:de+(o+1-(a+1)/2)*s,y:(i+1)*c}}var p={x:de+(o+1-(a+1)/2)*s,y:(i+1)*c};return p},he={},ge=y.length-1;ge>=0;ge--)for(var me=y[ge],ve=0;ve1&&t.avoidOverlap){p*=1.75;var b=Math.cos(d)-Math.cos(0),y=Math.sin(d)-Math.sin(0),x=Math.sqrt(p*p/(b*b+y*y));f=Math.max(x,f)}return s.layoutPositions(this,t,(function(e,n){var r=t.startAngle+n*d*(a?1:-1),i=f*Math.cos(r),o=f*Math.sin(r);return{x:c+i,y:u+o}})),this},e.exports=s},function(e,t,n){"use strict";var r=n(1),i=n(2),o={fit:!0,padding:30,startAngle:1.5*Math.PI,sweep:void 0,clockwise:!0,equidistant:!1,minNodeSpacing:10,boundingBox:void 0,avoidOverlap:!0,nodeDimensionsIncludeLabels:!1,height:void 0,width:void 0,spacingFactor:void 0,concentric:function(e){return e.degree()},levelWidth:function(e){return e.maxDegree()/4},animate:!1,animationDuration:500,animationEasing:void 0,animateFilter:function(e,t){return!0},ready:void 0,stop:void 0,transform:function(e,t){return t}};function a(e){this.options=r.extend({},o,e)}a.prototype.run=function(){for(var e=this.options,t=e,n=void 0!==t.counterclockwise?!t.counterclockwise:t.clockwise,r=e.cy,o=t.eles.nodes().not(":parent"),a=i.makeBoundingBox(t.boundingBox?t.boundingBox:{x1:0,y1:0,w:r.width(),h:r.height()}),s=a.x1+a.w/2,l=a.y1+a.h/2,c=[],u=(t.startAngle,0),d=0;d0&&Math.abs(b[0].value-x.value)>=m&&(b=[],v.push(b)),b.push(x)}var w=u+t.minNodeSpacing;if(!t.avoidOverlap){var k=v.length>0&&v[0].length>1,A=(Math.min(a.w,a.h)/2-w)/(v.length+k?1:0);w=Math.min(w,A)}for(var E=0,S=0;S1&&t.avoidOverlap){var O=Math.cos(_)-Math.cos(0),j=Math.sin(_)-Math.sin(0),T=Math.sqrt(w*w/(O*O+j*j));E=Math.max(T,E)}$.r=E,E+=w}if(t.equidistant){for(var P=0,D=0,R=0;R0)var c=(f=r.nodeOverlap*s)*i/(b=Math.sqrt(i*i+o*o)),d=f*o/b;else{var f,p=u(e,i,o),h=u(t,-1*i,-1*o),g=h.x-p.x,m=h.y-p.y,v=g*g+m*m,b=Math.sqrt(v);c=(f=(e.nodeRepulsion+t.nodeRepulsion)/v)*g/b,d=f*m/b}e.isLocked||(e.offsetX-=c,e.offsetY-=d),t.isLocked||(t.offsetX+=c,t.offsetY+=d)}},l=function(e,t,n,r){if(n>0)var i=e.maxX-t.minX;else i=t.maxX-e.minX;if(r>0)var o=e.maxY-t.minY;else o=t.maxY-e.minY;return i>=0&&o>=0?Math.sqrt(i*i+o*o):0},u=function(e,t,n){var r=e.positionX,i=e.positionY,o=e.height||1,a=e.width||1,s=n/t,l=o/a,c={};return 0===t&&0n?(c.x=r,c.y=i+o/2,c):0t&&-1*l<=s&&s<=l?(c.x=r-a/2,c.y=i-a*n/2/t,c):0=l)?(c.x=r+o*t/2/n,c.y=i+o/2,c):0>n&&(s<=-1*l||s>=l)?(c.x=r-o*t/2/n,c.y=i-o/2,c):c},d=function(e,t){for(var n=0;n1){var h=t.gravity*d/p,g=t.gravity*f/p;u.offsetX+=h,u.offsetY+=g}}}}},p=function(e,t){var n=[],r=0,i=-1;for(n.push.apply(n,e.graphSet[0]),i+=e.graphSet[0].length;r<=i;){var o=n[r++],a=e.idToIndex[o],s=e.layoutNodes[a],l=s.children;if(0n)var i={x:n*e/r,y:n*t/r};else i={x:e,y:t};return i},m=function e(t,n){var r=t.parentId;if(null!=r){var i=n.layoutNodes[n.idToIndex[r]],o=!1;return(null==i.maxX||t.maxX+i.padRight>i.maxX)&&(i.maxX=t.maxX+i.padRight,o=!0),(null==i.minX||t.minX-i.padLefti.maxY)&&(i.maxY=t.maxY+i.padBottom,o=!0),(null==i.minY||t.minY-i.padTopg&&(f+=h+t.componentSpacing,d=0,p=0,h=0)}}}(0,i),r})).then((function(e){d.layoutNodes=e.layoutNodes,o.stop(),b()}));var b=function(){!0===e.animate||!1===e.animate?v({force:!0,next:function(){n.one("layoutstop",e.stop),n.emit({type:"layoutstop",layout:n})}}):e.eles.nodes().layoutPositions(n,e,(function(e){var t=d.layoutNodes[d.idToIndex[e.data("id")]];return{x:t.positionX,y:t.positionY}}))};return this},c.prototype.stop=function(){return this.stopped=!0,this.thread&&this.thread.stop(),this.emit("layoutstop"),this},c.prototype.destroy=function(){return this.thread&&this.thread.stop(),this};var u=function(e,t,n){for(var r=n.eles.edges(),i=n.eles.nodes(),s={isCompound:e.hasCompoundNodes(),layoutNodes:[],idToIndex:{},nodeSize:i.size(),graphSet:[],indexToGraph:[],layoutEdges:[],edgeSize:r.size(),temperature:n.initialTemp,clientWidth:e.width(),clientHeight:e.width(),boundingBox:o.makeBoundingBox(n.boundingBox?n.boundingBox:{x1:0,y1:0,w:e.width(),h:e.height()})},l=n.eles.components(),c={},u=0;u0)for(s.graphSet.push(A),u=0;ur.count?0:r.graph},f=function e(t,n,r,i){var o=i.graphSet[r];if(-1a){var h=u(),g=d();(h-1)*g>=a?u(h-1):(g-1)*h>=a&&d(g-1)}else for(;c*l=a?d(v+1):u(m+1)}var b=o.w/c,y=o.h/l;if(t.condense&&(b=0,y=0),t.avoidOverlap)for(var x=0;x=c&&(T=0,j++)},D={},R=0;R(r=i.sqdistToFiniteLine(e,t,w[k],w[k+1],w[k+2],w[k+3])))return b(n,r),!0}else if("bezier"===a.edgeType||"multibezier"===a.edgeType||"self"===a.edgeType||"compound"===a.edgeType)for(w=a.allpts,k=0;k+5(r=i.sqdistToQuadraticBezier(e,t,w[k],w[k+1],w[k+2],w[k+3],w[k+4],w[k+5])))return b(n,r),!0;v=v||o.source,x=x||o.target;var A=l.getArrowWidth(s,u),E=[{name:"source",x:a.arrowStartX,y:a.arrowStartY,angle:a.srcArrowAngle},{name:"target",x:a.arrowEndX,y:a.arrowEndY,angle:a.tgtArrowAngle},{name:"mid-source",x:a.midX,y:a.midY,angle:a.midsrcArrowAngle},{name:"mid-target",x:a.midX,y:a.midY,angle:a.midtgtArrowAngle}];for(k=0;k0&&(y(v),y(x))}function w(e,t,n){return o.getPrefixedProperty(e,t,n)}function k(n,r){var o,a=n._private,s=m;o=r?r+"-":"";var l=n.pstyle(o+"label").value;if("yes"===n.pstyle("text-events").strValue&&l){var c=a.rstyle,u=n.pstyle("text-border-width").pfValue,d=n.pstyle("text-background-padding").pfValue,f=w(c,"labelWidth",r)+u+2*s+2*d,p=w(c,"labelHeight",r)+u+2*s+2*d,h=w(c,"labelX",r),g=w(c,"labelY",r),v=w(a.rscratch,"labelAngle",r),y=h-f/2,x=h+f/2,k=g-p/2,A=g+p/2;if(v){var E=Math.cos(v),S=Math.sin(v),$=function(e,t){return{x:(e-=h)*E-(t-=g)*S+h,y:e*S+t*E+g}},C=$(y,k),_=$(y,A),O=$(x,k),j=$(x,A),T=[C.x,C.y,O.x,O.y,j.x,j.y,_.x,_.y];if(i.pointInsidePolygonPoints(e,t,T))return b(n),!0}else{var P={w:f,h:p,x1:y,x2:x,y1:k,y2:A};if(i.inBoundingBox(P,e,t))return b(n),!0}}}n&&(u=u.interactive);for(var A=u.length-1;A>=0;A--){var E=u[A];E.isNode()?y(E)||k(E):x(E)||k(E)||k(E,"source")||k(E,"target")}return d},getAllInBox:function(e,t,n,r){var o=this.getCachedZSortedEles().interactive,a=[],s=Math.min(e,n),l=Math.max(e,n),c=Math.min(t,r),u=Math.max(t,r);e=s,n=l,t=c,r=u;for(var d=i.makeBoundingBox({x1:e,y1:t,x2:n,y2:r}),f=0;fb?b+"$-$"+v:v+"$-$"+b,g&&(t="unbundled$-$"+h.id);var y=u[t];null==y&&(y=u[t]=[],d.push(t)),y.push(Bt),g&&(y.hasUnbundled=!0),m&&(y.hasBezier=!0)}else f.push(Bt)}for(var x=0;xGt.id()){var k=Ht;Ht=Gt,Gt=k}Wt=Ht.position(),Yt=Gt.position(),Xt=Ht.outerWidth(),Qt=Ht.outerHeight(),Zt=Gt.outerWidth(),Jt=Gt.outerHeight(),n=l.nodeShapes[this.getNodeShape(Ht)],o=l.nodeShapes[this.getNodeShape(Gt)],s=!1;var A={north:0,west:0,south:0,east:0,northwest:0,southwest:0,northeast:0,southeast:0},E=Wt.x,S=Wt.y,$=Xt,C=Qt,_=Yt.x,O=Yt.y,j=Zt,T=Jt,P=w.length;for(p=0;p=d||w){p={cp:b,segment:x};break}}if(p)break}b=p.cp;var k=(d-g)/(x=p.segment).length,A=x.t1-x.t0,E=u?x.t0+A*k:x.t1-A*k;E=r.bound(0,E,1),t=r.qbezierPtAt(b.p0,b.p1,b.p2,E),c=function(e,t,n,i){var o=r.bound(0,i-.001,1),a=r.bound(0,i+.001,1),s=r.qbezierPtAt(e,t,n,o),l=r.qbezierPtAt(e,t,n,a);return f(s,l)}(b.p0,b.p1,b.p2,E);break;case"straight":case"segments":case"haystack":var S,$,C,_,O=0,j=i.allpts.length;for(v=0;v+3=d));v+=2);E=(d-$)/S,E=r.bound(0,E,1),t=r.lineAt(C,_,E),c=f(C,_)}l("labelX",o,t.x),l("labelY",o,t.y),l("labelAutoAngle",o,c)}};c("source"),c("target"),this.applyLabelDimensions(e)}},applyLabelDimensions:function(e){this.applyPrefixedLabelDimensions(e),e.isEdge()&&(this.applyPrefixedLabelDimensions(e,"source"),this.applyPrefixedLabelDimensions(e,"target"))},applyPrefixedLabelDimensions:function(e,t){var n=e._private,r=this.getLabelText(e,t),i=this.calculateLabelDimensions(e,r);o.setPrefixedProperty(n.rstyle,"labelWidth",t,i.width),o.setPrefixedProperty(n.rscratch,"labelWidth",t,i.width),o.setPrefixedProperty(n.rstyle,"labelHeight",t,i.height),o.setPrefixedProperty(n.rscratch,"labelHeight",t,i.height)},getLabelText:function(e,t){var n=e._private,r=t?t+"-":"",i=e.pstyle(r+"label").strValue,a=e.pstyle("text-transform").value,s=function(e,r){return r?(o.setPrefixedProperty(n.rscratch,e,t,r),r):o.getPrefixedProperty(n.rscratch,e,t)};"none"==a||("uppercase"==a?i=i.toUpperCase():"lowercase"==a&&(i=i.toLowerCase()));var l=e.pstyle("text-wrap").value;if("wrap"===l){var c=s("labelKey");if(c&&s("labelWrapKey")===c)return s("labelWrapCachedText");for(var u=i.split("\n"),d=e.pstyle("text-max-width").pfValue,f=[],p=0;pd){for(var g=h.split(/\s+/),m="",v=0;vd);k++)x+=i[k],k===i.length-1&&(w=!0);return w||(x+="…"),x}return i},calculateLabelDimensions:function(e,t,n){var r=e._private.labelStyleKey+"$@$"+t;n&&(r+="$@$"+n);var i=this.labelDimCache||(this.labelDimCache={});if(i[r])return i[r];var o=e.pstyle("font-style").strValue,a=1*e.pstyle("font-size").pfValue+"px",s=e.pstyle("font-family").strValue,l=e.pstyle("font-weight").strValue,c=this.labelCalcDiv;c||(c=this.labelCalcDiv=document.createElement("div"),document.body.appendChild(c));var u=c.style;return u.fontFamily=s,u.fontStyle=o,u.fontSize=a,u.fontWeight=l,u.position="absolute",u.left="-9999px",u.top="-9999px",u.zIndex="-1",u.visibility="hidden",u.pointerEvents="none",u.padding="0",u.lineHeight="1","wrap"===e.pstyle("text-wrap").value?u.whiteSpace="pre":u.whiteSpace="normal",c.textContent=t,i[r]={width:Math.ceil(c.clientWidth/1),height:Math.ceil(c.clientHeight/1)},i[r]},calculateLabelAngles:function(e){var t=e._private.rscratch,n=e.isEdge(),r=e.pstyle("text-rotation"),i=r.strValue;"none"===i?t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle=0:n&&"autorotate"===i?(t.labelAngle=Math.atan(t.midDispY/t.midDispX),t.sourceLabelAngle=t.sourceLabelAutoAngle,t.targetLabelAngle=t.targetLabelAutoAngle):t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle="autorotate"===i?0:r.pfValue}};e.exports=a},function(e,t,n){"use strict";var r={getNodeShape:function(e){var t=e.pstyle("shape").value;if(e.isParent())return"rectangle"===t||"roundrectangle"===t||"cutrectangle"===t||"barrel"===t?t:"rectangle";if("polygon"===t){var n=e.pstyle("shape-polygon-points").value;return this.nodeShapes.makePolygon(n).name}return t}};e.exports=r},function(e,t,n){"use strict";var r={registerCalculationListeners:function(){var e=this.cy,t=e.collection(),n=this,r=function(e,n){var r=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];t.merge(e);for(var i=0;i=e.desktopTapThreshold2}var C=n(i);b&&(e.hoverData.tapholdCancelled=!0),s=!0,t(v,["mousemove","vmousemove","tapdrag"],i,{position:{x:p[0],y:p[1]}});var _=function(){e.data.bgActivePosistion=void 0,e.hoverData.selecting||l.emit("boxstart"),m[4]=1,e.hoverData.selecting=!0,e.redrawHint("select",!0),e.redraw()};if(3===e.hoverData.which){if(b){var O={originalEvent:i,type:"cxtdrag",position:{x:p[0],y:p[1]}};x?x.emit(O):l.emit(O),e.hoverData.cxtDragged=!0,e.hoverData.cxtOver&&v===e.hoverData.cxtOver||(e.hoverData.cxtOver&&e.hoverData.cxtOver.emit({originalEvent:i,type:"cxtdragout",position:{x:p[0],y:p[1]}}),e.hoverData.cxtOver=v,v&&v.emit({originalEvent:i,type:"cxtdragover",position:{x:p[0],y:p[1]}}))}}else if(e.hoverData.dragging){if(s=!0,l.panningEnabled()&&l.userPanningEnabled()){var T;if(e.hoverData.justStartedPan){var P=e.hoverData.mdownPos;T={x:(p[0]-P[0])*c,y:(p[1]-P[1])*c},e.hoverData.justStartedPan=!1}else T={x:w[0]*c,y:w[1]*c};l.panBy(T),e.hoverData.dragged=!0}p=e.projectIntoViewport(i.clientX,i.clientY)}else if(1!=m[4]||null!=x&&!x.isEdge()){if(x&&x.isEdge()&&x.active()&&x.unactivate(),x&&x.grabbed()||v==y||(y&&t(y,["mouseout","tapdragout"],i,{position:{x:p[0],y:p[1]}}),v&&t(v,["mouseover","tapdragover"],i,{position:{x:p[0],y:p[1]}}),e.hoverData.last=v),x)if(b){if(l.boxSelectionEnabled()&&C)x&&x.grabbed()&&(f(k),x.emit("free")),_();else if(x&&x.grabbed()&&e.nodeIsDraggable(x)){var D=!e.dragData.didDrag;D&&e.redrawHint("eles",!0),e.dragData.didDrag=!0;var R=[];e.hoverData.draggingEles||u(l.collection(k),{inDragLayer:!0});for(var I=0;I0&&e.redrawHint("eles",!0),e.dragData.possibleDragElements=l=[]),t(s,["mouseup","tapend","vmouseup"],r,{position:{x:o[0],y:o[1]}}),e.dragData.didDrag||e.hoverData.dragged||e.hoverData.selecting||e.hoverData.isOverThresholdDrag||t(c,["click","tap","vclick"],r,{position:{x:o[0],y:o[1]}}),s!=c||e.dragData.didDrag||e.hoverData.selecting||null!=s&&s._private.selectable&&(e.hoverData.dragging||("additive"===i.selectionType()||u?s.selected()?s.unselect():s.select():u||(i.$(":selected").unmerge(s).unselect(),s.select())),e.redrawHint("eles",!0)),e.hoverData.selecting){var h=i.collection(e.getAllInBox(a[0],a[1],a[2],a[3]));e.redrawHint("select",!0),h.length>0&&e.redrawHint("eles",!0),i.emit("boxend");var g=function(e){return e.selectable()&&!e.selected()};"additive"===i.selectionType()||u||i.$(":selected").unmerge(h).unselect(),h.emit("box").stdFilter(g).select().emit("boxselect"),e.redraw()}if(e.hoverData.dragging&&(e.hoverData.dragging=!1,e.redrawHint("select",!0),e.redrawHint("eles",!0),e.redraw()),!a[4]){e.redrawHint("drag",!0),e.redrawHint("eles",!0);var m=c&&c.grabbed();f(l),m&&c.emit("free")}}a[4]=0,e.hoverData.down=null,e.hoverData.cxtStarted=!1,e.hoverData.draggingEles=!1,e.hoverData.selecting=!1,e.hoverData.isOverThresholdDrag=!1,e.dragData.didDrag=!1,e.hoverData.dragged=!1,e.hoverData.dragDelta=[],e.hoverData.mdownPos=null,e.hoverData.mdownGPos=null}}),!1),e.registerBinding(e.container,"wheel",(function(t){if(!e.scrollingPage){var n,r=e.cy,i=e.projectIntoViewport(t.clientX,t.clientY),o=[i[0]*r.zoom()+r.pan().x,i[1]*r.zoom()+r.pan().y];e.hoverData.draggingEles||e.hoverData.dragging||e.hoverData.cxtStarted||0!==e.selection[4]?t.preventDefault():r.panningEnabled()&&r.userPanningEnabled()&&r.zoomingEnabled()&&r.userZoomingEnabled()&&(t.preventDefault(),e.data.wheelZooming=!0,clearTimeout(e.data.wheelTimeout),e.data.wheelTimeout=setTimeout((function(){e.data.wheelZooming=!1,e.redrawHint("eles",!0),e.redraw()}),150),n=null!=t.deltaY?t.deltaY/-250:null!=t.wheelDeltaY?t.wheelDeltaY/1e3:t.wheelDelta/1e3,n*=e.wheelSensitivity,1===t.deltaMode&&(n*=33),r.zoom({level:r.zoom()*Math.pow(10,n),renderedPosition:{x:o[0],y:o[1]}}))}}),!0),e.registerBinding(window,"scroll",(function(t){e.scrollingPage=!0,clearTimeout(e.scrollingPageTimeout),e.scrollingPageTimeout=setTimeout((function(){e.scrollingPage=!1}),250)}),!0),e.registerBinding(e.container,"mouseout",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseout",position:{x:n[0],y:n[1]}})}),!1),e.registerBinding(e.container,"mouseover",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseover",position:{x:n[0],y:n[1]}})}),!1);var T,P,D,R,I=function(e,t,n,r){return Math.sqrt((n-e)*(n-e)+(r-t)*(r-t))},N=function(e,t,n,r){return(n-e)*(n-e)+(r-t)*(r-t)};if(e.registerBinding(e.container,"touchstart",T=function(n){if(j(n)){e.touchData.capture=!0,e.data.bgActivePosistion=void 0;var r=e.cy,i=e.touchData.now,o=e.touchData.earlier;if(n.touches[0]){var a=e.projectIntoViewport(n.touches[0].clientX,n.touches[0].clientY);i[0]=a[0],i[1]=a[1]}if(n.touches[1]&&(a=e.projectIntoViewport(n.touches[1].clientX,n.touches[1].clientY),i[2]=a[0],i[3]=a[1]),n.touches[2]&&(a=e.projectIntoViewport(n.touches[2].clientX,n.touches[2].clientY),i[4]=a[0],i[5]=a[1]),n.touches[1]){f(e.dragData.touchDragEles);var s=e.findContainerClientCoords();S=s[0],$=s[1],C=s[2],_=s[3],v=n.touches[0].clientX-S,b=n.touches[0].clientY-$,y=n.touches[1].clientX-S,x=n.touches[1].clientY-$,O=0<=v&&v<=C&&0<=y&&y<=C&&0<=b&&b<=_&&0<=x&&x<=_;var c=r.pan(),p=r.zoom();if(w=I(v,b,y,x),k=N(v,b,y,x),E=[((A=[(v+y)/2,(b+x)/2])[0]-c.x)/p,(A[1]-c.y)/p],k<4e4&&!n.touches[2]){var h=e.findNearestElement(i[0],i[1],!0,!0),g=e.findNearestElement(i[2],i[3],!0,!0);return h&&h.isNode()?(h.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=h):g&&g.isNode()?(g.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=g):r.emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxt=!0,e.touchData.cxtDragged=!1,e.data.bgActivePosistion=void 0,void e.redraw()}}if(n.touches[2]);else if(n.touches[1]);else if(n.touches[0]){var m=e.findNearestElements(i[0],i[1],!0,!0),T=m[0];if(null!=T&&(T.activate(),e.touchData.start=T,e.touchData.starts=m,e.nodeIsGrabbable(T))){var P=e.dragData.touchDragEles=[],D=null;e.redrawHint("eles",!0),e.redrawHint("drag",!0),T.selected()?(D=r.$((function(t){return t.selected()&&e.nodeIsGrabbable(t)})),u(D,{addToList:P})):d(T,{addToList:P}),l(T);var R=function(e){return{originalEvent:n,type:e,position:{x:i[0],y:i[1]}}};T.emit(R("grabon")),D?D.forEach((function(e){e.emit(R("grab"))})):T.emit(R("grab"))}t(T,["touchstart","tapstart","vmousedown"],n,{position:{x:i[0],y:i[1]}}),null==T&&(e.data.bgActivePosistion={x:a[0],y:a[1]},e.redrawHint("select",!0),e.redraw()),e.touchData.singleTouchMoved=!1,e.touchData.singleTouchStartTime=+new Date,clearTimeout(e.touchData.tapholdTimeout),e.touchData.tapholdTimeout=setTimeout((function(){!1!==e.touchData.singleTouchMoved||e.pinching||e.touchData.selecting||(t(e.touchData.start,["taphold"],n,{position:{x:i[0],y:i[1]}}),e.touchData.start||r.$(":selected").unselect())}),e.tapholdDuration)}if(n.touches.length>=1){for(var M=e.touchData.startPosition=[],z=0;z=e.touchTapThreshold2}if(i&&e.touchData.cxt){n.preventDefault();var D=n.touches[0].clientX-S,R=n.touches[0].clientY-$,M=n.touches[1].clientX-S,z=n.touches[1].clientY-$,L=N(D,R,M,z);if(L/k>=2.25||L>=22500){e.touchData.cxt=!1,e.data.bgActivePosistion=void 0,e.redrawHint("select",!0);var B={originalEvent:n,type:"cxttapend",position:{x:c[0],y:c[1]}};e.touchData.start?(e.touchData.start.unactivate().emit(B),e.touchData.start=null):l.emit(B)}}if(i&&e.touchData.cxt){B={originalEvent:n,type:"cxtdrag",position:{x:c[0],y:c[1]}},e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),e.touchData.start?e.touchData.start.emit(B):l.emit(B),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxtDragged=!0;var F=e.findNearestElement(c[0],c[1],!0,!0);e.touchData.cxtOver&&F===e.touchData.cxtOver||(e.touchData.cxtOver&&e.touchData.cxtOver.emit({originalEvent:n,type:"cxtdragout",position:{x:c[0],y:c[1]}}),e.touchData.cxtOver=F,F&&F.emit({originalEvent:n,type:"cxtdragover",position:{x:c[0],y:c[1]}}))}else if(i&&n.touches[2]&&l.boxSelectionEnabled())n.preventDefault(),e.data.bgActivePosistion=void 0,this.lastThreeTouch=+new Date,e.touchData.selecting||l.emit("boxstart"),e.touchData.selecting=!0,e.redrawHint("select",!0),s&&0!==s.length&&void 0!==s[0]?(s[2]=(c[0]+c[2]+c[4])/3,s[3]=(c[1]+c[3]+c[5])/3):(s[0]=(c[0]+c[2]+c[4])/3,s[1]=(c[1]+c[3]+c[5])/3,s[2]=(c[0]+c[2]+c[4])/3+1,s[3]=(c[1]+c[3]+c[5])/3+1),s[4]=1,e.touchData.selecting=!0,e.redraw();else if(i&&n.touches[1]&&l.zoomingEnabled()&&l.panningEnabled()&&l.userZoomingEnabled()&&l.userPanningEnabled()){if(n.preventDefault(),e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),ee=e.dragData.touchDragEles){e.redrawHint("drag",!0);for(var q=0;q0)return h[0]}return null},p=Object.keys(d),h=0;h0?f:r.roundRectangleIntersectLine(o,a,e,t,n,i,s)},checkPoint:function(e,t,n,i,o,a,s){var l=r.getRoundRectangleRadius(i,o),c=2*l;if(r.pointInsidePolygon(e,t,this.points,a,s,i,o-c,[0,-1],n))return!0;if(r.pointInsidePolygon(e,t,this.points,a,s,i-c,o,[0,-1],n))return!0;var u=i/2+2*n,d=o/2+2*n,f=[a-u,s-d,a-u,s,a+u,s,a+u,s-d];return!!r.pointInsidePolygonPoints(e,t,f)||!!r.checkInEllipse(e,t,c,c,a+i/2-l,s+o/2-l,n)||!!r.checkInEllipse(e,t,c,c,a-i/2+l,s+o/2-l,n)}}},registerNodeShapes:function(){var e=this.nodeShapes={},t=this;this.generateEllipse(),this.generatePolygon("triangle",r.generateUnitNgonPointsFitToSquare(3,0)),this.generatePolygon("rectangle",r.generateUnitNgonPointsFitToSquare(4,0)),e.square=e.rectangle,this.generateRoundRectangle(),this.generateCutRectangle(),this.generateBarrel(),this.generateBottomRoundrectangle(),this.generatePolygon("diamond",[0,1,1,0,0,-1,-1,0]),this.generatePolygon("pentagon",r.generateUnitNgonPointsFitToSquare(5,0)),this.generatePolygon("hexagon",r.generateUnitNgonPointsFitToSquare(6,0)),this.generatePolygon("heptagon",r.generateUnitNgonPointsFitToSquare(7,0)),this.generatePolygon("octagon",r.generateUnitNgonPointsFitToSquare(8,0));var n=new Array(20),i=r.generateUnitNgonPoints(5,0),o=r.generateUnitNgonPoints(5,Math.PI/5),a=.5*(3-Math.sqrt(5));a*=1.57;for(var s=0;s0&&t.data.lyrTxrCache.invalidateElements(n)}))}l.CANVAS_LAYERS=3,l.SELECT_BOX=0,l.DRAG=1,l.NODE=2,l.BUFFER_COUNT=3,l.TEXTURE_BUFFER=0,l.MOTIONBLUR_BUFFER_NODE=1,l.MOTIONBLUR_BUFFER_DRAG=2,l.redrawHint=function(e,t){var n=this;switch(e){case"eles":n.data.canvasNeedsRedraw[l.NODE]=t;break;case"drag":n.data.canvasNeedsRedraw[l.DRAG]=t;break;case"select":n.data.canvasNeedsRedraw[l.SELECT_BOX]=t}};var u="undefined"!=typeof Path2D;l.path2dEnabled=function(e){if(void 0===e)return this.pathsEnabled;this.pathsEnabled=!!e},l.usePaths=function(){return u&&this.pathsEnabled},[n(126),n(127),n(128),n(129),n(130),n(131),n(132),n(133),n(134),n(135)].forEach((function(e){r.extend(l,e)})),e.exports=s},function(e,t,n){"use strict";var r=n(2),i=n(1),o=n(9),a=n(19),s={dequeue:"dequeue",downscale:"downscale",highQuality:"highQuality"},l=function(e){this.renderer=e,this.onDequeues=[],this.setupDequeueing()},c=l.prototype;c.reasons=s,c.getTextureQueue=function(e){return this.eleImgCaches=this.eleImgCaches||{},this.eleImgCaches[e]=this.eleImgCaches[e]||[]},c.getRetiredTextureQueue=function(e){var t=this.eleImgCaches.retired=this.eleImgCaches.retired||{};return t[e]=t[e]||[]},c.getElementQueue=function(){return this.eleCacheQueue=this.eleCacheQueue||new o((function(e,t){return t.reqs-e.reqs}))},c.getElementIdToQueue=function(){return this.eleIdToCacheQueue=this.eleIdToCacheQueue||{}},c.getElement=function(e,t,n,i,o){var a=this,l=this.renderer,c=e._private.rscratch,u=l.cy.zoom();if(0===t.w||0===t.h||!e.visible())return null;if(null==i&&(i=Math.ceil(r.log2(u*n))),i<-4)i=-4;else if(u>=3.99||i>2)return null;var d,f=Math.pow(2,i),p=t.h*f,h=t.w*f,g=c.imgCaches=c.imgCaches||{},m=g[i];if(m)return m;if(d=p<=25?25:p<=50?50:50*Math.ceil(p/50),p>1024||h>1024||e.isEdge()||e.isParent())return null;var v=a.getTextureQueue(d),b=v[v.length-2],y=function(){return a.recycleTexture(d,h)||a.addTexture(d,h)};b||(b=v[v.length-1]),b||(b=y()),b.width-b.usedWidthi;$--)C=a.getElement(e,t,n,$,s.downscale);_()}else{var O;if(!A&&!E&&!S)for($=i-1;$>=-4;$--){var j;if(j=g[$]){O=j;break}}if(k(O))return a.queueElement(e,i),O;b.context.translate(b.usedWidth,0),b.context.scale(f,f),l.drawElement(b.context,e,t,w),b.context.scale(1/f,1/f),b.context.translate(-b.usedWidth,0)}return m=g[i]={ele:e,x:b.usedWidth,texture:b,level:i,scale:f,width:h,height:p,scaledLabelShown:w},b.usedWidth+=Math.ceil(h+8),b.eleCaches.push(m),a.checkTextureFullness(b),m},c.invalidateElement=function(e){var t=e._private.rscratch.imgCaches;if(t)for(var n=-4;n<=2;n++){var r=t[n];if(r){var o=r.texture;o.invalidatedWidth+=r.width,t[n]=null,i.removeFromArray(o.eleCaches,r),this.removeFromQueue(e),this.checkTextureUtility(o)}}},c.checkTextureUtility=function(e){e.invalidatedWidth>=.5*e.width&&this.retireTexture(e)},c.checkTextureFullness=function(e){var t=this.getTextureQueue(e.height);e.usedWidth/e.width>.8&&e.fullnessChecks>=10?i.removeFromArray(t,e):e.fullnessChecks++},c.retireTexture=function(e){var t=e.height,n=this.getTextureQueue(t);i.removeFromArray(n,e),e.retired=!0;for(var r=e.eleCaches,o=0;o=t)return a.retired=!1,a.usedWidth=0,a.invalidatedWidth=0,a.fullnessChecks=0,i.clearArray(a.eleCaches),a.context.setTransform(1,0,0,1,0,0),a.context.clearRect(0,0,a.width,a.height),i.removeFromArray(r,a),n.push(a),a}},c.queueElement=function(e,t){var n=this.getElementQueue(),r=this.getElementIdToQueue(),i=e.id(),o=r[i];if(o)o.level=Math.max(o.level,t),o.reqs++,n.updateItem(o);else{var a={ele:e,level:t,reqs:1};n.push(a),r[i]=a}},c.dequeue=function(e){for(var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=[],i=0;i<1&&t.size()>0;i++){var o=t.pop(),a=o.ele;if(null==a._private.rscratch.imgCaches[o.level]){n[a.id()]=null,r.push(o);var l=a.boundingBox();this.getElement(a,l,e,o.level,s.dequeue)}}return r},c.removeFromQueue=function(e){var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=n[e.id()];null!=r&&(r.reqs=i.MAX_INT,t.updateItem(r),t.pop(),n[e.id()]=null)},c.onDequeue=function(e){this.onDequeues.push(e)},c.offDequeue=function(e){i.removeFromArray(this.onDequeues,e)},c.setupDequeueing=a.setupDequeueing({deqRedrawThreshold:100,deqCost:.15,deqAvgCost:.1,deqNoDrawCost:.9,deqFastCost:.9,deq:function(e,t,n){return e.dequeue(t,n)},onDeqd:function(e,t){for(var n=0;n=3.99||n>2)return null;o.validateLayersElesOrdering(n,e);var l,c,u=o.layersByLevel,d=Math.pow(2,n),f=u[n]=u[n]||[];if(o.levelIsComplete(n,e))return f;!function(){var t=function(t){if(o.validateLayersElesOrdering(t,e),o.levelIsComplete(t,e))return c=u[t],!0},i=function(e){if(!c)for(var r=n+e;-4<=r&&r<=2&&!t(r);r+=e);};i(1),i(-1);for(var a=f.length-1;a>=0;a--){var s=f[a];s.invalid&&r.removeFromArray(f,s)}}();var p=function(t){var r=(t=t||{}).after;if(function(){if(!l){l=i.makeBoundingBox();for(var t=0;t16e6)return null;var a=o.makeLayer(l,n);if(null!=r){var s=f.indexOf(r)+1;f.splice(s,0,a)}else(void 0===t.insert||t.insert)&&f.unshift(a);return a};if(o.skipping&&!s)return null;for(var h=null,g=e.length/1,m=!s,v=0;v=g||!i.boundingBoxInBoundingBox(h.bb,b.boundingBox()))&&!(h=p({insert:!0,after:h})))return null;c||m?o.queueLayer(h,b):o.drawEleInLayer(h,b,n,t),h.eles.push(b),x[n]=h}}return c||(m?null:f)},c.getEleLevelForLayerLevel=function(e,t){return e},c.drawEleInLayer=function(e,t,n,r){var i=this.renderer,o=e.context,a=t.boundingBox();if(0!==a.w&&0!==a.h&&t.visible()){var s=this.eleTxrCache,l=s.reasons.highQuality;n=this.getEleLevelForLayerLevel(n,r);var c=s.getElement(t,a,null,n,l);c?(f(o,!1),o.drawImage(c.texture.canvas,c.x,0,c.width,c.height,a.x1,a.y1,a.w,a.h),f(o,!0)):i.drawElement(o,t)}},c.levelIsComplete=function(e,t){var n=this.layersByLevel[e];if(!n||0===n.length)return!1;for(var r=0,i=0;i0)return!1;if(o.invalid)return!1;r+=o.eles.length}return r===t.length},c.validateLayersElesOrdering=function(e,t){var n=this.layersByLevel[e];if(n)for(var r=0;r0){e=!0;break}}return e},c.invalidateElements=function(e){var t=this;t.lastInvalidationTime=r.performanceNow(),0!==e.length&&t.haveLayers()&&t.updateElementsInLayers(e,(function(e,n,r){t.invalidateLayer(e)}))},c.invalidateLayer=function(e){if(this.lastInvalidationTime=r.performanceNow(),!e.invalid){var t=e.level,n=e.eles,i=this.layersByLevel[t];r.removeFromArray(i,e),e.elesQueue=[],e.invalid=!0,e.replacement&&(e.replacement.invalid=!0);for(var o=0;o0&&void 0!==arguments[0]?arguments[0]:f;e.lineWidth=h,e.lineCap="butt",i.strokeStyle(e,d[0],d[1],d[2],n),i.drawEdgePath(t,e,o.allpts,p)},m=function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:f;i.drawArrowheads(e,t,n)};if(e.lineJoin="round","yes"===t.pstyle("ghost").value){var v=t.pstyle("ghost-offset-x").pfValue,b=t.pstyle("ghost-offset-y").pfValue,y=t.pstyle("ghost-opacity").value,x=f*y;e.translate(v,b),g(x),m(x),e.translate(-v,-b)}g(),m(),function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:c;e.lineWidth=l,"self"!==o.edgeType||a?e.lineCap="round":e.lineCap="butt",i.strokeStyle(e,u[0],u[1],u[2],n),i.drawEdgePath(t,e,o.allpts,"solid")}(),i.drawElementText(e,t,r),n&&e.translate(s.x1,s.y1)}},drawEdgePath:function(e,t,n,r){var i=e._private.rscratch,o=t,a=void 0,s=!1,l=this.usePaths();if(l){var c=n.join("$");i.pathCacheKey&&i.pathCacheKey===c?(a=t=i.pathCache,s=!0):(a=t=new Path2D,i.pathCacheKey=c,i.pathCache=a)}if(o.setLineDash)switch(r){case"dotted":o.setLineDash([1,1]);break;case"dashed":o.setLineDash([6,3]);break;case"solid":o.setLineDash([])}if(!s&&!i.badLine)switch(t.beginPath&&t.beginPath(),t.moveTo(n[0],n[1]),i.edgeType){case"bezier":case"self":case"compound":case"multibezier":if(e.hasClass("horizontal")){var u=n[4],d=n[5],f=(n[0]+n[4])/2;t.lineTo(n[0]+10,n[1]),t.bezierCurveTo(f,n[1],f,n[5],n[4]-10,n[5]),t.lineTo(u,d)}else if(e.hasClass("vertical")){var p=n[4],h=n[5],g=(n[1]+n[5])/2;t.bezierCurveTo(n[0],g,n[4],g,n[4],n[5]-10),t.lineTo(p,h)}else for(var m=2;m+30||j>0&&O>0){var P=f-T;switch(k){case"left":P-=m;break;case"center":P-=m/2}var D=p-v-T,R=m+2*T,I=v+2*T;if(_>0){var N=e.fillStyle,M=t.pstyle("text-background-color").value;e.fillStyle="rgba("+M[0]+","+M[1]+","+M[2]+","+_*o+")","roundrectangle"==t.pstyle("text-background-shape").strValue?(s=P,l=D,c=R,u=I,d=(d=2)||5,(a=e).beginPath(),a.moveTo(s+d,l),a.lineTo(s+c-d,l),a.quadraticCurveTo(s+c,l,s+c,l+d),a.lineTo(s+c,l+u-d),a.quadraticCurveTo(s+c,l+u,s+c-d,l+u),a.lineTo(s+d,l+u),a.quadraticCurveTo(s,l+u,s,l+u-d),a.lineTo(s,l+d),a.quadraticCurveTo(s,l,s+d,l),a.closePath(),a.fill()):e.fillRect(P,D,R,I),e.fillStyle=N}if(j>0&&O>0){var z=e.strokeStyle,L=e.lineWidth,B=t.pstyle("text-border-color").value,F=t.pstyle("text-border-style").value;if(e.strokeStyle="rgba("+B[0]+","+B[1]+","+B[2]+","+O*o+")",e.lineWidth=j,e.setLineDash)switch(F){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"double":e.lineWidth=j/4,e.setLineDash([]);break;case"solid":e.setLineDash([])}if(e.strokeRect(P,D,R,I),"double"===F){var q=j/2;e.strokeRect(P+q,D+q,R-2*q,I-2*q)}e.setLineDash&&e.setLineDash([]),e.lineWidth=L,e.strokeStyle=z}}var V=2*t.pstyle("text-outline-width").pfValue;if(V>0&&(e.lineWidth=V),"wrap"===t.pstyle("text-wrap").value){var U=r.getPrefixedProperty(i,"labelWrapCachedLines",n),H=v/U.length;switch(A){case"top":p-=(U.length-1)*H;break;case"center":case"bottom":p-=(U.length-1)*H}for(var G=0;G0&&e.strokeText(U[G],f,p),e.fillText(U[G],f,p),p+=H}else V>0&&e.strokeText(h,f,p),e.fillText(h,f,p);0!==E&&(e.rotate(-E),e.translate(-$,-C))}}},e.exports=o},function(e,t,n){"use strict";var r=n(0),i={drawNode:function(e,t,n,i){var o,a,s=this,l=t._private,c=l.rscratch,u=t.position();if(r.number(u.x)&&r.number(u.y)&&t.visible()){var d=t.effectiveOpacity(),f=s.usePaths(),p=void 0,h=!1,g=t.padding();o=t.width()+2*g,a=t.height()+2*g;var m=void 0;n&&(m=n,e.translate(-m.x1,-m.y1));for(var v=t.pstyle("background-image").value,b=new Array(v.length),y=new Array(v.length),x=0,w=0;w0&&void 0!==arguments[0]?arguments[0]:C;s.fillStyle(e,$[0],$[1],$[2],t)},P=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:j;s.strokeStyle(e,_[0],_[1],_[2],t)},D=t.pstyle("shape").strValue,R=t.pstyle("shape-polygon-points").pfValue;if(f){var I=D+"$"+o+"$"+a+("polygon"===D?"$"+R.join("$"):"");e.translate(u.x,u.y),c.pathCacheKey===I?(p=c.pathCache,h=!0):(p=new Path2D,c.pathCacheKey=I,c.pathCache=p)}var N,M,z,L=function(){if(!h){var n=u;f&&(n={x:0,y:0}),s.nodeShapes[s.getNodeShape(t)].draw(p||e,n.x,n.y,o,a)}f?e.fill(p):e.fill()},B=function(){for(var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,r=l.backgrounding,i=0,o=0;o0&&void 0!==arguments[0]&&arguments[0],r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:d;s.hasPie(t)&&(s.drawPie(e,t,r),n&&(f||s.nodeShapes[s.getNodeShape(t)].draw(e,u.x,u.y,o,a)))},q=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,n=(E>0?E:-E)*t,r=E>0?0:255;0!==E&&(s.fillStyle(e,r,r,r,n),f?e.fill(p):e.fill())},V=function(){if(S>0){if(e.lineWidth=S,e.lineCap="butt",e.setLineDash)switch(O){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"solid":case"double":e.setLineDash([])}if(f?e.stroke(p):e.stroke(),"double"===O){e.lineWidth=S/3;var t=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",f?e.stroke(p):e.stroke(),e.globalCompositeOperation=t}e.setLineDash&&e.setLineDash([])}};if("yes"===t.pstyle("ghost").value){var U=t.pstyle("ghost-offset-x").pfValue,H=t.pstyle("ghost-offset-y").pfValue,G=t.pstyle("ghost-opacity").value,W=G*d;e.translate(U,H),T(G*C),L(),B(W),F(0!==E||0!==S),q(W),P(G*j),V(),e.translate(-U,-H)}T(),L(),B(),F(0!==E||0!==S),q(),P(),V(),f&&e.translate(-u.x,-u.y),s.drawElementText(e,t,i),N=t.pstyle("overlay-padding").pfValue,M=t.pstyle("overlay-opacity").value,z=t.pstyle("overlay-color").value,M>0&&(s.fillStyle(e,z[0],z[1],z[2],M),s.nodeShapes.roundrectangle.draw(e,u.x,u.y,o+2*N,a+2*N),e.fill()),n&&e.translate(m.x1,m.y1)}},hasPie:function(e){return(e=e[0])._private.hasPie},drawPie:function(e,t,n,r){t=t[0],r=r||t.position();var i=t.cy().style(),o=t.pstyle("pie-size"),a=r.x,s=r.y,l=t.width(),c=t.height(),u=Math.min(l,c)/2,d=0;this.usePaths()&&(a=0,s=0),"%"===o.units?u*=o.pfValue:void 0!==o.pfValue&&(u=o.pfValue/2);for(var f=1;f<=i.pieBackgroundN;f++){var p=t.pstyle("pie-"+f+"-background-size").value,h=t.pstyle("pie-"+f+"-background-color").value,g=t.pstyle("pie-"+f+"-background-opacity").value*n,m=p/100;m+d>1&&(m=1-d);var v=1.5*Math.PI+2*Math.PI*d,b=v+2*Math.PI*m;0===p||d>=1||d+m>1||(e.beginPath(),e.moveTo(a,s),e.arc(a,s,u,v,b),e.closePath(),this.fillStyle(e,h[0],h[1],h[2],g),e.fill(),d+=m)}}};e.exports=i},function(e,t,n){"use strict";var r={},i=n(1);r.getPixelRatio=function(){var e=this.data.contexts[0];if(null!=this.forcedPixelRatio)return this.forcedPixelRatio;var t=e.backingStorePixelRatio||e.webkitBackingStorePixelRatio||e.mozBackingStorePixelRatio||e.msBackingStorePixelRatio||e.oBackingStorePixelRatio||e.backingStorePixelRatio||1;return(window.devicePixelRatio||1)/t},r.paintCache=function(e){for(var t,n=this.paintCaches=this.paintCaches||[],r=!0,i=0;is.minMbLowQualFrames&&(s.motionBlurPxRatio=s.mbPxRBlurry)),s.clearingMotionBlur&&(s.motionBlurPxRatio=1),s.textureDrawLastFrame&&!f&&(d[s.NODE]=!0,d[s.SELECT_BOX]=!0);var y=c.style()._private.coreStyle,x=c.zoom(),w=void 0!==o?o:x,k=c.pan(),A={x:k.x,y:k.y},E={zoom:x,pan:{x:k.x,y:k.y}},S=s.prevViewport;void 0===S||E.zoom!==S.zoom||E.pan.x!==S.pan.x||E.pan.y!==S.pan.y||m&&!g||(s.motionBlurPxRatio=1),a&&(A=a),w*=l,A.x*=l,A.y*=l;var $=s.getCachedZSortedEles();function C(e,t,n,r,i){var o=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",s.fillStyle(e,255,255,255,s.motionBlurTransparency),e.fillRect(t,n,r,i),e.globalCompositeOperation=o}function _(e,r){var i,l,c,d;s.clearingMotionBlur||e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]&&e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]?(i=A,l=w,c=s.canvasWidth,d=s.canvasHeight):(i={x:k.x*h,y:k.y*h},l=x*h,c=s.canvasWidth*h,d=s.canvasHeight*h),e.setTransform(1,0,0,1,0,0),"motionBlur"===r?C(e,0,0,c,d):t||void 0!==r&&!r||e.clearRect(0,0,c,d),n||(e.translate(i.x,i.y),e.scale(l,l)),a&&e.translate(a.x,a.y),o&&e.scale(o,o)}if(f||(s.textureDrawLastFrame=!1),f){if(s.textureDrawLastFrame=!0,!s.textureCache){s.textureCache={},s.textureCache.bb=c.mutableElements().boundingBox(),s.textureCache.texture=s.data.bufferCanvases[s.TEXTURE_BUFFER];var O=s.data.bufferContexts[s.TEXTURE_BUFFER];O.setTransform(1,0,0,1,0,0),O.clearRect(0,0,s.canvasWidth*s.textureMult,s.canvasHeight*s.textureMult),s.render({forcedContext:O,drawOnlyNodeLayer:!0,forcedPxRatio:l*s.textureMult}),(E=s.textureCache.viewport={zoom:c.zoom(),pan:c.pan(),width:s.canvasWidth,height:s.canvasHeight}).mpan={x:(0-E.pan.x)/E.zoom,y:(0-E.pan.y)/E.zoom}}d[s.DRAG]=!1,d[s.NODE]=!1;var j=u.contexts[s.NODE],T=s.textureCache.texture;E=s.textureCache.viewport,s.textureCache.bb,j.setTransform(1,0,0,1,0,0),p?C(j,0,0,E.width,E.height):j.clearRect(0,0,E.width,E.height);var P=y["outside-texture-bg-color"].value,D=y["outside-texture-bg-opacity"].value;s.fillStyle(j,P[0],P[1],P[2],D),j.fillRect(0,0,E.width,E.height),x=c.zoom(),_(j,!1),j.clearRect(E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l),j.drawImage(T,E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l)}else s.textureOnViewport&&!t&&(s.textureCache=null);var R=c.extent(),I=s.pinching||s.hoverData.dragging||s.swipePanning||s.data.wheelZooming||s.hoverData.draggingEles,N=s.hideEdgesOnViewport&&I,M=[];if(M[s.NODE]=!d[s.NODE]&&p&&!s.clearedForMotionBlur[s.NODE]||s.clearingMotionBlur,M[s.NODE]&&(s.clearedForMotionBlur[s.NODE]=!0),M[s.DRAG]=!d[s.DRAG]&&p&&!s.clearedForMotionBlur[s.DRAG]||s.clearingMotionBlur,M[s.DRAG]&&(s.clearedForMotionBlur[s.DRAG]=!0),d[s.NODE]||n||r||M[s.NODE]){var z=p&&!M[s.NODE]&&1!==h;_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]:u.contexts[s.NODE]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.nondrag,l,R):s.drawLayeredElements(j,$.nondrag,l,R),s.debug&&s.drawDebugPoints(j,$.nondrag),n||p||(d[s.NODE]=!1)}if(!r&&(d[s.DRAG]||n||M[s.DRAG])&&(z=p&&!M[s.DRAG]&&1!==h,_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]:u.contexts[s.DRAG]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.drag,l,R):s.drawCachedElements(j,$.drag,l,R),s.debug&&s.drawDebugPoints(j,$.drag),n||p||(d[s.DRAG]=!1)),s.showFps||!r&&d[s.SELECT_BOX]&&!n){if(_(j=t||u.contexts[s.SELECT_BOX]),1==s.selection[4]&&(s.hoverData.selecting||s.touchData.selecting)){x=s.cy.zoom();var L=y["selection-box-border-width"].value/x;j.lineWidth=L,j.fillStyle="rgba("+y["selection-box-color"].value[0]+","+y["selection-box-color"].value[1]+","+y["selection-box-color"].value[2]+","+y["selection-box-opacity"].value+")",j.fillRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]),L>0&&(j.strokeStyle="rgba("+y["selection-box-border-color"].value[0]+","+y["selection-box-border-color"].value[1]+","+y["selection-box-border-color"].value[2]+","+y["selection-box-opacity"].value+")",j.strokeRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]))}if(u.bgActivePosistion&&!s.hoverData.selecting){x=s.cy.zoom();var B=u.bgActivePosistion;j.fillStyle="rgba("+y["active-bg-color"].value[0]+","+y["active-bg-color"].value[1]+","+y["active-bg-color"].value[2]+","+y["active-bg-opacity"].value+")",j.beginPath(),j.arc(B.x,B.y,y["active-bg-size"].pfValue/x,0,2*Math.PI),j.fill()}var F=s.lastRedrawTime;if(s.showFps&&F){F=Math.round(F);var q=Math.round(1e3/F);j.setTransform(1,0,0,1,0,0),j.fillStyle="rgba(255, 0, 0, 0.75)",j.strokeStyle="rgba(255, 0, 0, 0.75)",j.lineWidth=1,j.fillText("1 frame = "+F+" ms = "+q+" fps",0,20),j.strokeRect(0,30,250,20),j.fillRect(0,30,250*Math.min(q/60,1),20)}n||(d[s.SELECT_BOX]=!1)}if(p&&1!==h){var V=u.contexts[s.NODE],U=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_NODE],H=u.contexts[s.DRAG],G=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_DRAG],W=function(e,t,n){e.setTransform(1,0,0,1,0,0),n||!b?e.clearRect(0,0,s.canvasWidth,s.canvasHeight):C(e,0,0,s.canvasWidth,s.canvasHeight);var r=h;e.drawImage(t,0,0,s.canvasWidth*r,s.canvasHeight*r,0,0,s.canvasWidth,s.canvasHeight)};(d[s.NODE]||M[s.NODE])&&(W(V,U,M[s.NODE]),d[s.NODE]=!1),(d[s.DRAG]||M[s.DRAG])&&(W(H,G,M[s.DRAG]),d[s.DRAG]=!1)}s.prevViewport=E,s.clearingMotionBlur&&(s.clearingMotionBlur=!1,s.motionBlurCleared=!0,s.motionBlur=!0),p&&(s.motionBlurTimeout=setTimeout((function(){s.motionBlurTimeout=null,s.clearedForMotionBlur[s.NODE]=!1,s.clearedForMotionBlur[s.DRAG]=!1,s.motionBlur=!1,s.clearingMotionBlur=!f,s.mbFrames=0,d[s.NODE]=!0,d[s.DRAG]=!0,s.redraw()}),100)),t||c.emit("render")},e.exports=r},function(e,t,n){"use strict";for(var r=n(2),i={drawPolygonPath:function(e,t,n,r,i,o){var a=r/2,s=i/2;e.beginPath&&e.beginPath(),e.moveTo(t+a*o[0],n+s*o[1]);for(var l=1;l0&&a>0){p.clearRect(0,0,o,a),p.globalCompositeOperation="source-over";var h=this.getCachedZSortedEles();if(e.full)p.translate(-n.x1*c,-n.y1*c),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(n.x1*c,n.y1*c);else{var g=t.pan(),m={x:g.x*c,y:g.y*c};c*=t.zoom(),p.translate(m.x,m.y),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(-m.x,-m.y)}e.bg&&(p.globalCompositeOperation="destination-over",p.fillStyle=e.bg,p.rect(0,0,o,a),p.fill())}return f},i.png=function(e){return a(e,this.bufferCanvasImage(e),"image/png")},i.jpg=function(e){return a(e,this.bufferCanvasImage(e),"image/jpeg")},e.exports=i},function(e,t,n){"use strict";var r={nodeShapeImpl:function(e,t,n,r,i,o,a){switch(e){case"ellipse":return this.drawEllipsePath(t,n,r,i,o);case"polygon":return this.drawPolygonPath(t,n,r,i,o,a);case"roundrectangle":return this.drawRoundRectanglePath(t,n,r,i,o);case"cutrectangle":return this.drawCutRectanglePath(t,n,r,i,o);case"bottomroundrectangle":return this.drawBottomRoundRectanglePath(t,n,r,i,o);case"barrel":return this.drawBarrelPath(t,n,r,i,o)}}};e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(18),a=function e(){if(!(this instanceof e))return new e;this.length=0},s=a.prototype;s.instanceString=function(){return"stylesheet"},s.selector=function(e){return this[this.length++]={selector:e,properties:[]},this},s.css=function(e,t){var n=this.length-1;if(r.string(e))this[n].properties.push({name:e,value:t});else if(r.plainObject(e))for(var a=e,s=0;s=0&&(e._idleTimeoutId=setTimeout((function(){e._onTimeout&&e._onTimeout()}),t))},n(239),t.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,t.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(this,n(35))},function(e,t,n){(function(e,t){!function(e,n){"use strict";if(!e.setImmediate){var r,i,o,a,s,l=1,c={},u=!1,d=e.document,f=Object.getPrototypeOf&&Object.getPrototypeOf(e);f=f&&f.setTimeout?f:e,"[object process]"==={}.toString.call(e.process)?r=function(e){t.nextTick((function(){h(e)}))}:!function(){if(e.postMessage&&!e.importScripts){var t=!0,n=e.onmessage;return e.onmessage=function(){t=!1},e.postMessage("","*"),e.onmessage=n,t}}()?e.MessageChannel?((o=new MessageChannel).port1.onmessage=function(e){h(e.data)},r=function(e){o.port2.postMessage(e)}):d&&"onreadystatechange"in d.createElement("script")?(i=d.documentElement,r=function(e){var t=d.createElement("script");t.onreadystatechange=function(){h(e),t.onreadystatechange=null,i.removeChild(t),t=null},i.appendChild(t)}):r=function(e){setTimeout(h,0,e)}:(a="setImmediate$"+Math.random()+"$",s=function(t){t.source===e&&"string"==typeof t.data&&0===t.data.indexOf(a)&&h(+t.data.slice(a.length))},e.addEventListener?e.addEventListener("message",s,!1):e.attachEvent("onmessage",s),r=function(t){e.postMessage(a+t,"*")}),f.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n1)for(var n=1;n=t||n<0||m&&e-c>=o}function w(){var e=p();if(x(e))return k(e);s=setTimeout(w,function(e){var n=t-(e-l);return m?f(n,o-(e-c)):n}(e))}function k(e){return s=void 0,v&&r?b(e):(r=i=void 0,a)}function A(){var e=p(),n=x(e);if(r=arguments,i=this,l=e,n){if(void 0===s)return y(l);if(m)return s=setTimeout(w,t),b(l)}return void 0===s&&(s=setTimeout(w,t)),a}return t=g(t)||0,h(n)&&(u=!!n.leading,o=(m="maxWait"in n)?d(g(n.maxWait)||0,t):o,v="trailing"in n?!!n.trailing:v),A.cancel=function(){void 0!==s&&clearTimeout(s),c=0,r=l=i=s=void 0},A.flush=function(){return void 0===s?a:k(p())},A}}).call(this,n(35))},function(e,t,n){e.exports=n(243)},function(e,t,n){var r,i,o;(function(){var n,a,s,l,c,u,d,f,p,h,g,m,v,b,y;s=Math.floor,h=Math.min,a=function(e,t){return et?1:0},p=function(e,t,n,r,i){var o;if(null==n&&(n=0),null==i&&(i=a),n<0)throw new Error("lo must be non-negative");for(null==r&&(r=e.length);nn;0<=n?t++:t--)c.push(t);return c}.apply(this).reverse()).length;rg;0<=g?++u:--u)m.push(c(e,n));return m},b=function(e,t,n,r){var i,o,s;for(null==r&&(r=a),i=e[n];n>t&&r(i,o=e[s=n-1>>1])<0;)e[n]=o,n=s;return e[n]=i},y=function(e,t,n){var r,i,o,s,l;for(null==n&&(n=a),i=e.length,l=t,o=e[t],r=2*t+1;r'+e.content+"":s+=">"+e.content+"";var l=t(s);return l.data("selector",e.selector),l.data("on-click-function",e.onClickFunction),l.data("show",void 0===e.show||e.show),l}function y(){var e;l("active")&&(e=s.children(),t(e).each((function(){x(t(this))})),i.off("tapstart",n),s.remove(),c(s=void 0,void 0),c("active",!1),c("anyVisibleChild",!1))}function x(e){var n="string"==typeof e?t("#"+e):e,r=n.data("cy-context-menus-cxtfcn"),o=n.data("selector"),a=n.data("call-on-click-function"),s=n.data("cy-context-menus-cxtcorefcn");r&&i.off("cxttap",o,r),s&&i.off("cxttap",s),a&&n.off("click",a),n.remove()}"get"!==e&&(c("options",a=function(e,t){var n={};for(var r in e)n[r]=e[r];for(var r in t)n[r]=t[r];return n}(r,e)),l("active")&&y(),c("active",!0),o=u(a.contextMenuClasses),(s=t("
    ")).addClass("cy-context-menus-cxt-menu"),c("cxtMenu",s),t("body").append(s),s=s,g(a.menuItems),i.on("tapstart",n=function(){f(s),c("cxtMenuPosition",void 0),c("currentCyEvent",void 0)}),t(".cy-context-menus-cxt-menu").contextmenu((function(){return!1})));return function(e){return{isActive:function(){return l("active")},appendMenuItem:function(t){return m(t),e},appendMenuItems:function(t){return g(t),e},removeMenuItem:function(t){return x(t),e},setTrailingDivider:function(n,r){return function(e,n){var r=t("#"+e);n?r.addClass("cy-context-menus-divider"):r.removeClass("cy-context-menus-divider")}(n,r),e},insertBeforeMenuItem:function(t,n){return v(t,n),e},moveBeforeOtherMenuItem:function(n,r){return function(e,n){if(e!==n){var r=t("#"+e).detach(),i=t("#"+n);r.insertBefore(i)}}(n,r),e},disableMenuItem:function(n){return t("#"+n).attr("disabled",!0),e},enableMenuItem:function(n){return t("#"+n).attr("disabled",!1),e},hideMenuItem:function(n){return t("#"+n).data("show",!1),f(t("#"+n)),e},showMenuItem:function(n){return t("#"+n).data("show",!0),d(t("#"+n)),e},destroy:function(){return y(),e}}}(this)}))}};e.exports&&(e.exports=o),void 0===(r=function(){return o}.call(t,n,t,e))||(e.exports=r),"undefined"!=typeof cytoscape&&i&&o(cytoscape,i)}()},function(e,t,n){var r;r=function(e){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var i=t[r]={i:r,l:!1,exports:{}};return e[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var i in e)n.d(r,i,function(t){return e[t]}.bind(null,i));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=0)}([function(e,t,n){var r=n(1),i=function(e){e&&e("layout","dagre",r)};"undefined"!=typeof cytoscape&&i(cytoscape),e.exports=i},function(e,t,n){function r(e){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var i=n(2),o=n(3),a=n(4);function s(e){this.options=o({},i,e)}s.prototype.run=function(){var e=this.options,t=e.cy,n=e.eles,i=function(e,t){return"function"==typeof t?t.apply(e,[e]):t},o=e.boundingBox||{x1:0,y1:0,w:t.width(),h:t.height()};void 0===o.x2&&(o.x2=o.x1+o.w),void 0===o.w&&(o.w=o.x2-o.x1),void 0===o.y2&&(o.y2=o.y1+o.h),void 0===o.h&&(o.h=o.y2-o.y1);var s=new a.graphlib.Graph({multigraph:!0,compound:!0}),l={},c=function(e,t){null!=t&&(l[e]=t)};c("nodesep",e.nodeSep),c("edgesep",e.edgeSep),c("ranksep",e.rankSep),c("rankdir",e.rankDir),c("ranker",e.ranker),s.setGraph(l),s.setDefaultEdgeLabel((function(){return{}})),s.setDefaultNodeLabel((function(){return{}}));for(var u=n.nodes(),d=0;d1?t-1:0),r=1;r-1}},function(e,t,n){var r=n(75);e.exports=function(e,t){var n=this.__data__,i=r(n,e);return i<0?(++this.size,n.push([e,t])):n[i][1]=t,this}},function(e,t,n){var r=n(74);e.exports=function(){this.__data__=new r,this.size=0}},function(e,t){e.exports=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}},function(e,t){e.exports=function(e){return this.__data__.get(e)}},function(e,t){e.exports=function(e){return this.__data__.has(e)}},function(e,t,n){var r=n(74),i=n(117),o=n(118);e.exports=function(e,t){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!i||a.length<199)return a.push([e,t]),this.size=++n.size,this;n=this.__data__=new o(a)}return n.set(e,t),this.size=n.size,this}},function(e,t,n){var r=n(64),i=n(262),o=n(23),a=n(151),s=/^\[object .+?Constructor\]$/,l=Function.prototype,c=Object.prototype,u=l.toString,d=c.hasOwnProperty,f=RegExp("^"+u.call(d).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");e.exports=function(e){return!(!o(e)||i(e))&&(r(e)?f:s).test(a(e))}},function(e,t,n){var r=n(58),i=Object.prototype,o=i.hasOwnProperty,a=i.toString,s=r?r.toStringTag:void 0;e.exports=function(e){var t=o.call(e,s),n=e[s];try{e[s]=void 0;var r=!0}catch(e){}var i=a.call(e);return r&&(t?e[s]=n:delete e[s]),i}},function(e,t){var n=Object.prototype.toString;e.exports=function(e){return n.call(e)}},function(e,t,n){var r,i=n(263),o=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";e.exports=function(e){return!!o&&o in e}},function(e,t,n){var r=n(29)["__core-js_shared__"];e.exports=r},function(e,t){e.exports=function(e,t){return null==e?void 0:e[t]}},function(e,t,n){var r=n(266),i=n(74),o=n(117);e.exports=function(){this.size=0,this.__data__={hash:new r,map:new(o||i),string:new r}}},function(e,t,n){var r=n(267),i=n(268),o=n(269),a=n(270),s=n(271);function l(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t0){if(++t>=800)return arguments[0]}else t=0;return e.apply(void 0,arguments)}}},function(e,t,n){var r=n(173),i=n(340),o=n(344),a=n(174),s=n(345),l=n(129);e.exports=function(e,t,n){var c=-1,u=i,d=e.length,f=!0,p=[],h=p;if(n)f=!1,u=o;else if(d>=200){var g=t?null:s(e);if(g)return l(g);f=!1,u=a,h=new r}else h=t?[]:p;e:for(;++c-1}},function(e,t,n){var r=n(188),i=n(342),o=n(343);e.exports=function(e,t,n){return t==t?o(e,t,n):r(e,i,n)}},function(e,t){e.exports=function(e){return e!=e}},function(e,t){e.exports=function(e,t,n){for(var r=n-1,i=e.length;++r1||1===t.length&&e.hasEdge(t[0],t[0])}))}},function(e,t,n){var r=n(22);e.exports=function(e,t,n){return function(e,t,n){var r={},i=e.nodes();return i.forEach((function(e){r[e]={},r[e][e]={distance:0},i.forEach((function(t){e!==t&&(r[e][t]={distance:Number.POSITIVE_INFINITY})})),n(e).forEach((function(n){var i=n.v===e?n.w:n.v,o=t(n);r[e][i]={distance:o,predecessor:e}}))})),i.forEach((function(e){var t=r[e];i.forEach((function(n){var o=r[n];i.forEach((function(n){var r=o[e],i=t[n],a=o[n],s=r.distance+i.distance;s0;){if(n=l.removeMin(),r.has(s,n))a.setEdge(n,s[n]);else{if(u)throw new Error("Input graph is not connected: "+e);u=!0}e.nodeEdges(n).forEach(c)}return a}},function(e,t,n){"use strict";var r=n(11),i=n(399),o=n(402),a=n(403),s=n(20).normalizeRanks,l=n(405),c=n(20).removeEmptyRanks,u=n(406),d=n(407),f=n(408),p=n(409),h=n(418),g=n(20),m=n(28).Graph;e.exports=function(e,t){var n=t&&t.debugTiming?g.time:g.notime;n("layout",(function(){var t=n(" buildLayoutGraph",(function(){return function(e){var t=new m({multigraph:!0,compound:!0}),n=$(e.graph());return t.setGraph(r.merge({},b,S(n,v),r.pick(n,y))),r.forEach(e.nodes(),(function(n){var i=$(e.node(n));t.setNode(n,r.defaults(S(i,x),w)),t.setParent(n,e.parent(n))})),r.forEach(e.edges(),(function(n){var i=$(e.edge(n));t.setEdge(n,r.merge({},A,S(i,k),r.pick(i,E)))})),t}(e)}));n(" runLayout",(function(){!function(e,t){t(" makeSpaceForEdgeLabels",(function(){!function(e){var t=e.graph();t.ranksep/=2,r.forEach(e.edges(),(function(n){var r=e.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===t.rankdir||"BT"===t.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(e)})),t(" removeSelfEdges",(function(){!function(e){r.forEach(e.edges(),(function(t){if(t.v===t.w){var n=e.node(t.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:t,label:e.edge(t)}),e.removeEdge(t)}}))}(e)})),t(" acyclic",(function(){i.run(e)})),t(" nestingGraph.run",(function(){u.run(e)})),t(" rank",(function(){a(g.asNonCompoundGraph(e))})),t(" injectEdgeLabelProxies",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(n.width&&n.height){var r=e.node(t.v),i={rank:(e.node(t.w).rank-r.rank)/2+r.rank,e:t};g.addDummyNode(e,"edge-proxy",i,"_ep")}}))}(e)})),t(" removeEmptyRanks",(function(){c(e)})),t(" nestingGraph.cleanup",(function(){u.cleanup(e)})),t(" normalizeRanks",(function(){s(e)})),t(" assignRankMinMax",(function(){!function(e){var t=0;r.forEach(e.nodes(),(function(n){var i=e.node(n);i.borderTop&&(i.minRank=e.node(i.borderTop).rank,i.maxRank=e.node(i.borderBottom).rank,t=r.max(t,i.maxRank))})),e.graph().maxRank=t}(e)})),t(" removeEdgeLabelProxies",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);"edge-proxy"===n.dummy&&(e.edge(n.e).labelRank=n.rank,e.removeNode(t))}))}(e)})),t(" normalize.run",(function(){o.run(e)})),t(" parentDummyChains",(function(){l(e)})),t(" addBorderSegments",(function(){d(e)})),t(" order",(function(){p(e)})),t(" insertSelfEdges",(function(){!function(e){var t=g.buildLayerMatrix(e);r.forEach(t,(function(t){var n=0;r.forEach(t,(function(t,i){var o=e.node(t);o.order=i+n,r.forEach(o.selfEdges,(function(t){g.addDummyNode(e,"selfedge",{width:t.label.width,height:t.label.height,rank:o.rank,order:i+ ++n,e:t.e,label:t.label},"_se")})),delete o.selfEdges}))}))}(e)})),t(" adjustCoordinateSystem",(function(){f.adjust(e)})),t(" position",(function(){h(e)})),t(" positionSelfEdges",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);if("selfedge"===n.dummy){var r=e.node(n.e.v),i=r.x+r.width/2,o=r.y,a=n.x-i,s=r.height/2;e.setEdge(n.e,n.label),e.removeNode(t),n.label.points=[{x:i+2*a/3,y:o-s},{x:i+5*a/6,y:o-s},{x:i+a,y:o},{x:i+5*a/6,y:o+s},{x:i+2*a/3,y:o+s}],n.label.x=n.x,n.label.y=n.y}}))}(e)})),t(" removeBorderNodes",(function(){!function(e){r.forEach(e.nodes(),(function(t){if(e.children(t).length){var n=e.node(t),i=e.node(n.borderTop),o=e.node(n.borderBottom),a=e.node(r.last(n.borderLeft)),s=e.node(r.last(n.borderRight));n.width=Math.abs(s.x-a.x),n.height=Math.abs(o.y-i.y),n.x=a.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(e.nodes(),(function(t){"border"===e.node(t).dummy&&e.removeNode(t)}))}(e)})),t(" normalize.undo",(function(){o.undo(e)})),t(" fixupEdgeLabelCoords",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(e)})),t(" undoCoordinateSystem",(function(){f.undo(e)})),t(" translateGraph",(function(){!function(e){var t=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,o=0,a=e.graph(),s=a.marginx||0,l=a.marginy||0;function c(e){var r=e.x,a=e.y,s=e.width,l=e.height;t=Math.min(t,r-s/2),n=Math.max(n,r+s/2),i=Math.min(i,a-l/2),o=Math.max(o,a+l/2)}r.forEach(e.nodes(),(function(t){c(e.node(t))})),r.forEach(e.edges(),(function(t){var n=e.edge(t);r.has(n,"x")&&c(n)})),t-=s,i-=l,r.forEach(e.nodes(),(function(n){var r=e.node(n);r.x-=t,r.y-=i})),r.forEach(e.edges(),(function(n){var o=e.edge(n);r.forEach(o.points,(function(e){e.x-=t,e.y-=i})),r.has(o,"x")&&(o.x-=t),r.has(o,"y")&&(o.y-=i)})),a.width=n-t+s,a.height=o-i+l}(e)})),t(" assignNodeIntersects",(function(){!function(e){r.forEach(e.edges(),(function(t){var n,r,i=e.edge(t),o=e.node(t.v),a=e.node(t.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=a,r=o),i.points.unshift(g.intersectRect(o,n)),i.points.push(g.intersectRect(a,r))}))}(e)})),t(" reversePoints",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);n.reversed&&n.points.reverse()}))}(e)})),t(" acyclic.undo",(function(){i.undo(e)}))}(t,n)})),n(" updateInputGraph",(function(){!function(e,t){r.forEach(e.nodes(),(function(n){var r=e.node(n),i=t.node(n);r&&(r.x=i.x,r.y=i.y,t.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(e.edges(),(function(n){var i=e.edge(n),o=t.edge(n);i.points=o.points,r.has(o,"x")&&(i.x=o.x,i.y=o.y)})),e.graph().width=t.graph().width,e.graph().height=t.graph().height}(e,t)}))}))};var v=["nodesep","edgesep","ranksep","marginx","marginy"],b={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},y=["acyclicer","ranker","rankdir","align"],x=["width","height"],w={width:0,height:0},k=["minlen","weight","width","height","labeloffset"],A={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function S(e,t){return r.mapValues(r.pick(e,t),Number)}function $(e){var t={};return r.forEach(e,(function(e,n){t[n.toLowerCase()]=e})),t}},function(e,t,n){var r=n(149);e.exports=function(e){return r(e,5)}},function(e,t,n){var r=n(89),i=n(57),o=n(90),a=n(48),s=Object.prototype,l=s.hasOwnProperty,c=r((function(e,t){e=Object(e);var n=-1,r=t.length,c=r>2?t[2]:void 0;for(c&&o(t[0],t[1],c)&&(r=1);++n-1?s[l?t[c]:c]:void 0}}},function(e,t,n){var r=n(188),i=n(37),o=n(365),a=Math.max;e.exports=function(e,t,n){var s=null==e?0:e.length;if(!s)return-1;var l=null==n?0:o(n);return l<0&&(l=a(s+l,0)),r(e,i(t,3),l)}},function(e,t,n){var r=n(196);e.exports=function(e){var t=r(e),n=t%1;return t==t?n?t-n:t:0}},function(e,t,n){var r=n(367),i=n(23),o=n(61),a=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,l=/^0o[0-7]+$/i,c=parseInt;e.exports=function(e){if("number"==typeof e)return e;if(o(e))return NaN;if(i(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=i(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=r(e);var n=s.test(e);return n||l.test(e)?c(e.slice(2),n?2:8):a.test(e)?NaN:+e}},function(e,t,n){var r=n(368),i=/^\s+/;e.exports=function(e){return e?e.slice(0,r(e)+1).replace(i,""):e}},function(e,t){var n=/\s/;e.exports=function(e){for(var t=e.length;t--&&n.test(e.charAt(t)););return t}},function(e,t,n){var r=n(128),i=n(169),o=n(48);e.exports=function(e,t){return null==e?e:r(e,i(t),o)}},function(e,t){e.exports=function(e){var t=null==e?0:e.length;return t?e[t-1]:void 0}},function(e,t,n){var r=n(79),i=n(127),o=n(37);e.exports=function(e,t){var n={};return t=o(t,3),i(e,(function(e,i,o){r(n,i,t(e,i,o))})),n}},function(e,t,n){var r=n(132),i=n(373),o=n(49);e.exports=function(e){return e&&e.length?r(e,o,i):void 0}},function(e,t){e.exports=function(e,t){return e>t}},function(e,t,n){var r=n(375),i=n(379)((function(e,t,n){r(e,t,n)}));e.exports=i},function(e,t,n){var r=n(73),i=n(198),o=n(128),a=n(376),s=n(23),l=n(48),c=n(199);e.exports=function e(t,n,u,d,f){t!==n&&o(n,(function(o,l){if(f||(f=new r),s(o))a(t,n,l,u,e,d,f);else{var p=d?d(c(t,l),o,l+"",t,n,f):void 0;void 0===p&&(p=o),i(t,l,p)}}),l)}},function(e,t,n){var r=n(198),i=n(155),o=n(164),a=n(156),s=n(165),l=n(66),c=n(13),u=n(189),d=n(59),f=n(64),p=n(23),h=n(377),g=n(67),m=n(199),v=n(378);e.exports=function(e,t,n,b,y,x,w){var k=m(e,n),A=m(t,n),E=w.get(A);if(E)r(e,n,E);else{var S=x?x(k,A,n+"",e,t,w):void 0,$=void 0===S;if($){var C=c(A),_=!C&&d(A),O=!C&&!_&&g(A);S=A,C||_||O?c(k)?S=k:u(k)?S=a(k):_?($=!1,S=i(A,!0)):O?($=!1,S=o(A,!0)):S=[]:h(A)||l(A)?(S=k,l(k)?S=v(k):p(k)&&!f(k)||(S=s(A))):$=!1}$&&(w.set(A,S),y(S,A,b,x,w),w.delete(A)),r(e,n,S)}}},function(e,t,n){var r=n(47),i=n(84),o=n(32),a=Function.prototype,s=Object.prototype,l=a.toString,c=s.hasOwnProperty,u=l.call(Object);e.exports=function(e){if(!o(e)||"[object Object]"!=r(e))return!1;var t=i(e);if(null===t)return!0;var n=c.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&l.call(n)==u}},function(e,t,n){var r=n(65),i=n(48);e.exports=function(e){return r(e,i(e))}},function(e,t,n){var r=n(89),i=n(90);e.exports=function(e){return r((function(t,n){var r=-1,o=n.length,a=o>1?n[o-1]:void 0,s=o>2?n[2]:void 0;for(a=e.length>3&&"function"==typeof a?(o--,a):void 0,s&&i(n[0],n[1],s)&&(a=o<3?void 0:a,o=1),t=Object(t);++r1&&a(e,t[0],t[1])?t=[]:n>2&&a(t[0],t[1],t[2])&&(t=[t[0]]),i(e,r(t,1),[])}));e.exports=s},function(e,t,n){var r=n(88),i=n(86),o=n(37),a=n(184),s=n(393),l=n(82),c=n(394),u=n(49),d=n(13);e.exports=function(e,t,n){t=t.length?r(t,(function(e){return d(e)?function(t){return i(t,1===e.length?e[0]:e)}:e})):[u];var f=-1;t=r(t,l(o));var p=a(e,(function(e,n,i){return{criteria:r(t,(function(t){return t(e)})),index:++f,value:e}}));return s(p,(function(e,t){return c(e,t,n)}))}},function(e,t){e.exports=function(e,t){var n=e.length;for(e.sort(t);n--;)e[n]=e[n].value;return e}},function(e,t,n){var r=n(395);e.exports=function(e,t,n){for(var i=-1,o=e.criteria,a=t.criteria,s=o.length,l=n.length;++i=l?c:c*("desc"==n[i]?-1:1)}return e.index-t.index}},function(e,t,n){var r=n(61);e.exports=function(e,t){if(e!==t){var n=void 0!==e,i=null===e,o=e==e,a=r(e),s=void 0!==t,l=null===t,c=t==t,u=r(t);if(!l&&!u&&!a&&e>t||a&&s&&c&&!l&&!u||i&&s&&c||!n&&c||!o)return 1;if(!i&&!a&&!u&&e0;--l)if(r=t[l].dequeue()){i=i.concat(s(e,t,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(c,(function(t){return e.outEdges(t.v,t.w)})),!0)};var a=r.constant(1);function s(e,t,n,i,o){var a=o?[]:void 0;return r.forEach(e.inEdges(i.v),(function(r){var i=e.edge(r),s=e.node(r.v);o&&a.push({v:r.v,w:r.w}),s.out-=i,l(t,n,s)})),r.forEach(e.outEdges(i.v),(function(r){var i=e.edge(r),o=r.w,a=e.node(o);a.in-=i,l(t,n,a)})),e.removeNode(i.v),a}function l(e,t,n){n.out?n.in?e[n.out-n.in+t].enqueue(n):e[e.length-1].enqueue(n):e[0].enqueue(n)}},function(e,t){function n(){var e={};e._next=e._prev=e,this._sentinel=e}function r(e){e._prev._next=e._next,e._next._prev=e._prev,delete e._next,delete e._prev}function i(e,t){if("_next"!==e&&"_prev"!==e)return t}e.exports=n,n.prototype.dequeue=function(){var e=this._sentinel,t=e._prev;if(t!==e)return r(t),t},n.prototype.enqueue=function(e){var t=this._sentinel;e._prev&&e._next&&r(e),e._next=t._next,t._next._prev=e,t._next=e,e._prev=t},n.prototype.toString=function(){for(var e=[],t=this._sentinel,n=t._prev;n!==t;)e.push(JSON.stringify(n,i)),n=n._prev;return"["+e.join(", ")+"]"}},function(e,t,n){"use strict";var r=n(11),i=n(20);e.exports={run:function(e){e.graph().dummyChains=[],r.forEach(e.edges(),(function(t){!function(e,t){var n,r,o,a=t.v,s=e.node(a).rank,l=t.w,c=e.node(l).rank,u=t.name,d=e.edge(t),f=d.labelRank;if(c===s+1)return;for(e.removeEdge(t),o=0,++s;sl.lim&&(c=l,u=!0);var d=r.filter(t.edges(),(function(t){return u===b(e,e.node(t.v),c)&&u!==b(e,e.node(t.w),c)}));return r.minBy(d,(function(e){return o(t,e)}))}function v(e,t,n,i){var o=n.v,a=n.w;e.removeEdge(o,a),e.setEdge(i.v,i.w,{}),p(e),d(e,t),function(e,t){var n=r.find(e.nodes(),(function(e){return!t.node(e).parent})),i=s(e,n);i=i.slice(1),r.forEach(i,(function(n){var r=e.node(n).parent,i=t.edge(n,r),o=!1;i||(i=t.edge(r,n),o=!0),t.node(n).rank=t.node(r).rank+(o?i.minlen:-i.minlen)}))}(e,t)}function b(e,t,n){return n.low<=t.lim&&t.lim<=n.lim}e.exports=u,u.initLowLimValues=p,u.initCutValues=d,u.calcCutValue=f,u.leaveEdge=g,u.enterEdge=m,u.exchangeEdges=v},function(e,t,n){var r=n(11);e.exports=function(e){var t=function(e){var t={},n=0;function i(o){var a=n;r.forEach(e.children(o),i),t[o]={low:a,lim:n++}}return r.forEach(e.children(),i),t}(e);r.forEach(e.graph().dummyChains,(function(n){for(var r=e.node(n),i=r.edgeObj,o=function(e,t,n,r){var i,o,a=[],s=[],l=Math.min(t[n].low,t[r].low),c=Math.max(t[n].lim,t[r].lim);i=n;do{i=e.parent(i),a.push(i)}while(i&&(t[i].low>l||c>t[i].lim));o=i,i=r;for(;(i=e.parent(i))!==o;)s.push(i);return{path:a.concat(s.reverse()),lca:o}}(e,t,i.v,i.w),a=o.path,s=o.lca,l=0,c=a[l],u=!0;n!==i.w;){if(r=e.node(n),u){for(;(c=a[l])!==s&&e.node(c).maxRank=2),s=u.buildLayerMatrix(e);var m=o(e,s);m0;)t%2&&(n+=l[t+1]),l[t=t-1>>1]+=e.weight;c+=e.weight*n}))),c}e.exports=function(e,t){for(var n=0,r=1;r=e.barycenter)&&function(e,t){var n=0,r=0;e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.vs=t.vs.concat(e.vs),e.barycenter=n/r,e.weight=r,e.i=Math.min(t.i,e.i),t.merged=!0}(e,t)}}function i(t){return function(n){n.in.push(t),0==--n.indegree&&e.push(n)}}for(;e.length;){var o=e.pop();t.push(o),r.forEach(o.in.reverse(),n(o)),r.forEach(o.out,i(o))}return r.map(r.filter(t,(function(e){return!e.merged})),(function(e){return r.pick(e,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(e){return!e.indegree})))}},function(e,t,n){var r=n(11),i=n(20);function o(e,t,n){for(var i;t.length&&(i=r.last(t)).i<=n;)t.pop(),e.push(i.vs),n++;return n}e.exports=function(e,t){var n=i.partition(e,(function(e){return r.has(e,"barycenter")})),a=n.lhs,s=r.sortBy(n.rhs,(function(e){return-e.i})),l=[],c=0,u=0,d=0;a.sort((f=!!t,function(e,t){return e.barycentert.barycenter?1:f?t.i-e.i:e.i-t.i})),d=o(l,s,d),r.forEach(a,(function(e){d+=e.vs.length,l.push(e.vs),c+=e.barycenter*e.weight,u+=e.weight,d=o(l,s,d)}));var f;var p={vs:r.flatten(l,!0)};u&&(p.barycenter=c/u,p.weight=u);return p}},function(e,t,n){var r=n(11),i=n(28).Graph;e.exports=function(e,t,n){var o=function(e){var t;for(;e.hasNode(t=r.uniqueId("_root")););return t}(e),a=new i({compound:!0}).setGraph({root:o}).setDefaultNodeLabel((function(t){return e.node(t)}));return r.forEach(e.nodes(),(function(i){var s=e.node(i),l=e.parent(i);(s.rank===t||s.minRank<=t&&t<=s.maxRank)&&(a.setNode(i),a.setParent(i,l||o),r.forEach(e[n](i),(function(t){var n=t.v===i?t.w:t.v,o=a.edge(n,i),s=r.isUndefined(o)?0:o.weight;a.setEdge(n,i,{weight:e.edge(t).weight+s})})),r.has(s,"minRank")&&a.setNode(i,{borderLeft:s.borderLeft[t],borderRight:s.borderRight[t]}))})),a}},function(e,t,n){var r=n(11);e.exports=function(e,t,n){var i,o={};r.forEach(n,(function(n){for(var r,a,s=e.parent(n);s;){if((r=e.parent(s))?(a=o[r],o[r]=s):(a=i,i=s),a&&a!==s)return void t.setEdge(a,s);s=r}}))}},function(e,t,n){"use strict";var r=n(11),i=n(20),o=n(419).positionX;e.exports=function(e){(function(e){var t=i.buildLayerMatrix(e),n=e.graph().ranksep,o=0;r.forEach(t,(function(t){var i=r.max(r.map(t,(function(t){return e.node(t).height})));r.forEach(t,(function(t){e.node(t).y=o+i/2})),o+=i+n}))})(e=i.asNonCompoundGraph(e)),r.forEach(o(e),(function(t,n){e.node(n).x=t}))}},function(e,t,n){"use strict";var r=n(11),i=n(28).Graph,o=n(20);function a(e,t){var n={};return r.reduce(t,(function(t,i){var o=0,a=0,s=t.length,c=r.last(i);return r.forEach(i,(function(t,u){var d=function(e,t){if(e.node(t).dummy)return r.find(e.predecessors(t),(function(t){return e.node(t).dummy}))}(e,t),f=d?e.node(d).order:s;(d||t===c)&&(r.forEach(i.slice(a,u+1),(function(t){r.forEach(e.predecessors(t),(function(r){var i=e.node(r),a=i.order;!(as)&&l(n,t,c)}))}))}return r.reduce(t,(function(t,n){var o,a=-1,s=0;return r.forEach(n,(function(r,l){if("border"===e.node(r).dummy){var c=e.predecessors(r);c.length&&(o=e.node(c[0]).order,i(n,s,l,a,o),s=l,a=o)}i(n,s,n.length,o,t.length)})),n})),n}function l(e,t,n){if(t>n){var r=t;t=n,n=r}var i=e[t];i||(e[t]=i={}),i[n]=!0}function c(e,t,n){if(t>n){var i=t;t=n,n=i}return r.has(e[t],n)}function u(e,t,n,i){var o={},a={},s={};return r.forEach(t,(function(e){r.forEach(e,(function(e,t){o[e]=e,a[e]=e,s[e]=t}))})),r.forEach(t,(function(e){var t=-1;r.forEach(e,(function(e){var l=i(e);if(l.length)for(var u=((l=r.sortBy(l,(function(e){return s[e]}))).length-1)/2,d=Math.floor(u),f=Math.ceil(u);d<=f;++d){var p=l[d];a[e]===e&&t\n.menu ul ul {\n margin-left: 12px;\n}\n\n\n\n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(425),i=n(21);n(426),angular.module("dbt").directive("modelTreeLine",["$state",function(e){return{scope:{item:"=",depth:"<",resourceType:"@"},replace:!0,templateUrl:r,link:function(t,n,r,o){t.depth||(t.depth=0);var a=t.item.name;if(a){var s=i.last(a,15).join(""),l=i.initial(a,s.length).join("");t.name={name:a,start:l,end:s},t.name_start=l,t.name_end=s,t.onFolderClick=function(n){if(n.active=!n.active,"source"==t.resourceType){var r=n.name;e.go("dbt.source_list",{source:r})}else 0===t.depth&&"database"!==n.type&&e.go("dbt.project_overview",{project_name:n.name})},t.activate=function(n){t.$emit("clearSearch"),n.active=!0;var r="dbt."+n.node.resource_type;e.go(r,{unique_id:n.unique_id})},t.getIcon=function(e,t){return"#"+{header:{on:"icn-down",off:"icn-right"},database:{on:"icn-db-on",off:"icn-db"},schema:{on:"icn-tree-on",off:"icn-tree"},table:{on:"icn-doc-on",off:"icn-doc"},folder:{on:"icn-dir-on",off:"icn-dir"},file:{on:"icn-doc-on",off:"icn-doc"}}[e][t]},t.getClass=function(e){return{active:e.active,"menu-tree":"header"==e.type||"schema"==e.type||"folder"==e.type,"menu-main":"header"==e.type,"menu-node":"file"==e.type||"table"==e.type}}}}}}])},function(e,t){var n="/components/model_tree/model_tree_line.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
  • \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n
      \n \n
    \n
  • \n')}]),e.exports=n},function(e,t,n){var r=n(427);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.unselectable{\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(429);n(31);n(206),r.module("dbt").directive("docsSearch",["$sce","project",function(e,t){return{scope:{query:"=",results:"=",onSelect:"&"},replace:!0,templateUrl:i,link:function(n){n.max_results=20,n.show_all=!1,n.max_results_columns=3,n.limit_columns={},n.checkboxStatus={show_names:!1,show_descriptions:!1,show_columns:!1,show_code:!1,show_tags:!1},n.limit_search=function(e,t,r){return t0&&null!=n.query&&n.query.trim().length>0){let t=e.replace(/\s+/g," "),o=r(i(n.query)[0]),a=t.search(new RegExp(o)),s=a-75<0?0:a-75,l=a+75>t.length?t.length:a+75;return"..."+t.substring(s,l)+"..."}return e},n.highlight=function(t){if(!n.query||!t)return e.trustAsHtml(t);let o="("+i(n.query).map(e=>r(e)).join(")|(")+")";return e.trustAsHtml(t.replace(new RegExp(o,"gi"),'$&'))},n.$watch("query",(function(e,t){0==e.length&&(n.show_all=!1,n.limit_columns={})})),n.columnFilter=function(e){var t=[];let r=i(n.query);for(var o in e)r.every(e=>-1!=o.toLowerCase().indexOf(e))&&t.push(o);return t},n.limitColumns=function(e){return void 0!==n.limit_columns[e]?n.limit_columns[e]:3}}}}])},function(e,t){var n="/components/search/search.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n \n
    \n
    \n

    \n {{ query }}\n {{ results.length }} search results\n

    \n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n \n
    \n
    \n

    \n \n {{result.model.resource_type}}\n

    \n

    \n
    \n
    \n
    \n \n columns:\n \n \n \n Show {{ columnFilter(result.model.columns).length - max_results_columns }} more\n
    \n
    \n \n \n \n
    \n
    \n \n tags:\n \n \n \n
    \n
    \n Show {{ results.length - max_results }} more\n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(431);n(432);const i=n(21);angular.module("dbt").directive("tableDetails",["$sce","$filter",function(e,t){return{scope:{model:"=",extras:"=",exclude:"<"},templateUrl:r,link:function(e){function n(e,t){if(0==e)return"0 bytes";if(e<1&&(e*=1e6),isNaN(parseFloat(e))||!isFinite(e))return"-";void 0===t&&(t=0);var n=Math.floor(Math.log(e)/Math.log(1024));return(e/Math.pow(1024,Math.floor(n))).toFixed(t)+" "+["bytes","KB","MB","GB","TB","PB"][n]}function r(e,n){return void 0===n&&(n=2),t("number")(100*e,n)+"%"}function o(e,n){return void 0===n&&(n=0),t("number")(e,n)}e.details=[],e.extended=[],e.exclude=e.exclude||[],e.meta=null,e._show_expanded=!1,e.show_expanded=function(t){return void 0!==t&&(e._show_expanded=t),e._show_expanded},e.hasData=function(e){return!(!e||i.isEmpty(e))&&(1!=e.length||0!=e[0].include)},e.$watch("model",(function(t,a){i.property(["metadata","type"])(t);var s,l,c,u=t.hasOwnProperty("sources")&&null!=t.sources[0]?t.sources[0].source_meta:null;if(e.meta=t.meta||u,e.details=function(e){var t,n,r=!e.metadata,o=e.metadata||{};t=e.database?e.database+".":"",n=r?void 0:"source"==e.resource_type?t+e.schema+"."+e.identifier:t+e.schema+"."+e.alias;var a,s=[{name:"Owner",value:o.owner},{name:"Type",value:r?void 0:(a=o.type,"BASE TABLE"==a?{type:"table",name:"table"}:"LATE BINDING VIEW"==a?{type:"view",name:"late binding view"}:{type:a.toLowerCase(),name:a.toLowerCase()}).name},{name:"Package",value:e.package_name},{name:"Language",value:e.language},{name:"Relation",value:n}];return i.filter(s,(function(e){return void 0!==e.value}))}(t),e.extended=(s=t.stats,l={rows:o,row_count:o,num_rows:o,max_varchar:o,pct_used:r,size:n,bytes:n,num_bytes:n},c=i.sortBy(i.values(s),"label"),i.map(c,(function(e){var t=i.clone(e),n=l[e.id];return n&&(t.value=n(e.value),t.label=e.label.replace("Approximate","~"),t.label=e.label.replace("Utilization","Used")),t}))),e.extras){var d=i.filter(e.extras,(function(e){return void 0!==e.value&&null!==e.value}));e.details=e.details.concat(d)}e.show_extended=i.where(e.extended,{include:!0}).length>0})),e.queryTag=function(t){e.$emit("query",t)}}}}])},function(e,t){var n="/components/table_details/table_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    Details
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    Tags
    \n
    \n {{ tag }} \n
    \n
    untagged
    \n
    \n
    \n
    {{ item.name }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ item.label }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){var r=n(433);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n\n.details-content {\n table-layout: fixed;\n}\n\n.detail-body {\n white-space: nowrap;\n overflow-x: scroll;\n}\n",""])},function(e,t,n){"use strict";const r=n(435),i=n(21);angular.module("dbt").directive("columnDetails",["project",function(e){return{scope:{model:"="},templateUrl:r,link:function(t){t.has_test=function(e,t){return-1!=i.pluck(e.tests,"short").indexOf(t)},t.has_more_info=function(e){var t=e.tests||[],n=e.description||"",r=e.meta||{};return t.length||n.length||!i.isEmpty(r)},t.toggle_column_expanded=function(e){t.has_more_info(e)&&(e.expanded=!e.expanded)},t.getState=function(e){return"dbt."+e.resource_type},t.get_col_name=function(t){return e.caseColumn(t)},t.get_columns=function(e){var t=i.chain(e.columns).values().sortBy("index").value();return i.each(t,(function(e,t){e.index=t})),t}}}}])},function(e,t){var n="/components/column_details/column_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n
    \n Column information is not available for this seed\n
    \n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ColumnTypeDescriptionTestsMore?
    \n
    \n {{ get_col_name(column.name) }}\n
    \n
    \n {{ column.type }}

    \n
    \n {{ column.description }}\n \n \n U\n N\n F\n A\n +\n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Details
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n\n
    \n
    Description
    \n \n
    \n\n
    \n
    Generic Tests
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(437);n(31),n(438);function i(e){return"python"===e?"language-python":"language-sql"}angular.module("dbt").directive("codeBlock",["code","$timeout",function(e,t){return{scope:{versions:"=",default:"<",language:"="},restrict:"E",templateUrl:r,link:function(n,r){n.selected_version=n.default,n.language_class=i(n.language),n.source=null,n.setSelected=function(r){n.selected_version=r,n.source=n.versions[r]||"";const i=n.source.trim();n.highlighted=e.highlight(i,n.language),t((function(){Prism.highlightAll()}))},n.titleCase=function(e){return e.charAt(0).toUpperCase()+e.substring(1)},n.copied=!1,n.copy_to_clipboard=function(){e.copy_to_clipboard(n.source),n.copied=!0,setTimeout((function(){n.$apply((function(){n.copied=!1}))}),1e3)},n.$watch("language",(function(e,t){e&&e!=t&&(n.language_class=i(e))}),!0),n.$watch("versions",(function(e,t){if(e)if(n.default)n.setSelected(n.default);else{var r=Object.keys(n.versions);r.length>0&&n.setSelected(r[0])}}),!0)}}}])},function(e,t){var n="/components/code_block/code_block.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    Code
    \n\n')}]),e.exports=n},function(e,t,n){var r=n(439);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"pre.code {\n border: none !important;\n overflow-y: visible !important;\n overflow-x: scroll !important;\n padding-bottom: 10px;\n}\n\npre.code code {\n font-family: Monaco, monospace !important;\n font-weight: 400 !important;\n}\n\n.line-numbers-rows {\n border: none !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(441);angular.module("dbt").directive("macroArguments",[function(){return{scope:{macro:"="},templateUrl:r,link:function(e){_.each(e.macro.arguments,(function(e){e.expanded=!1}))}}}])},function(e,t){var n="/components/macro_arguments/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n
    \n
    \n Details are not available for this macro\n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ArgumentTypeDescriptionMore?
    \n
    \n {{ arg.name }}\n
    \n
    \n {{ arg.type }}

    \n
    \n {{ arg.description }}\n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Description
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(443);angular.module("dbt").directive("referenceList",["$state",function(e){return{scope:{references:"=",node:"="},restrict:"E",templateUrl:r,link:function(t){t.selected_type=null,t.setType=function(e){t.selected_type=e,t.nodes=t.references[t.selected_type]},t.getNodeUrl=function(t){var n="dbt."+t.resource_type;return e.href(n,{unique_id:t.unique_id,"#":null})},t.mapResourceType=function(e){return"model"==e?"Models":"seed"==e?"Seeds":"test"==e?"Tests":"snapshot"==e?"Snapshots":"analysis"==e?"Analyses":"macro"==e?"Macros":"exposure"==e?"Exposures":"metric"==e?"Metrics":"operation"==e?"Operations":"Nodes"},t.$watch("references",(function(e){e&&_.size(e)>0?(t.selected_type=_.keys(e)[0],t.has_references=!0,t.nodes=t.references[t.selected_type]):t.has_references=!1}))}}}])},function(e,t){var n="/components/references/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n No resources reference this {{ node.resource_type }}\n
    \n
    \n \n
    \n \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){n(445),n(447),n(448),n(449),n(450),n(451),n(452),n(453),n(454),n(455)},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ModelCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.copied=!1,e.copy_to_clipboard=function(t){r.copy_to_clipboard(t),e.copied=!0,setTimeout((function(){e.$apply((function(){e.copied=!1}))}),1e3)},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.nav-tabs li.nav-pull-right {\n flex: 1 0 auto;\n text-align: right;\n}\n\ntr.column-row-selected {\n\n}\n\ntd.column-expanded{\n padding: 0px !important;\n}\n\ntd.column-expanded > div {\n padding: 5px 10px;\n margin-left: 20px;\n height: 100%;\n\n border-left: 1px solid #ccc !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SourceCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Sample SQL":r.generateSourceSQL(e.model)},e.extra_table_fields=[{name:"Loader",value:e.model.loader},{name:"Source",value:e.model.source_name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SeedCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Example SQL":r.generateSourceSQL(e.model)}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SnapshotCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"Compiled SQL is not available for this snapshot"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("TestCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(33);n(34),r.module("dbt").controller("MacroCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,a,s,l){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.macro={},n.ready((function(t){let n=t.macros[e.model_uid];if(e.macro=n,e.references=o.getMacroReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=o.getMacroParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.macro.is_adapter_macro){var r=t.metadata.adapter_type;e.versions=n.impls,n.impls[r]?e.default_version=r:n.impls.default?e.default_version="default":e.default_version=i.keys(n.impls)[0]}else e.default_version="Source",e.versions={Source:e.macro.macro_sql}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("AnalysisCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.default_version="Source",e.versions={Source:"",Compiled:""},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language,e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ExposureCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.exposure={},n.ready((function(t){let n=t.nodes[e.model_uid];e.exposure=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.language=n.language,e.extra_table_fields=[{name:"Maturity",value:e.exposure.maturity},{name:"Owner",value:e.exposure.owner.name},{name:"Owner email",value:e.exposure.owner.email},{name:"Exposure name",value:e.exposure.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("MetricCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.metric={},n.ready((function(t){let n=t.nodes[e.model_uid];e.metric=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.versions={Definition:r.generateMetricSQL(e.metric)};const o="expression"===e.metric.type?"Expression metric":"Aggregate metric";e.extra_table_fields=[{name:"Metric Type",value:o},{name:"Metric name",value:e.metric.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("OperationCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";n(9).module("dbt").controller("GraphCtrl",["$scope","$state","$window","graph","project","selectorService",function(e,t,n,r,i,o){function a(e){return e&&"source"==e.resource_type?"source:"+e.source_name+"."+e.name:e&&"exposure"==e.resource_type?"exposure:"+e.name:e&&"metric"==e.resource_type?"metric:"+e.name:e.name?e.name:"*"}e.graph=r.graph,e.graphService=r,e.graphRendered=function(e){r.setGraphReady(e)},e.$watch((function(){return t.params.unique_id}),(function(e,t){e&&e!=t&&i.find_by_id(e,(function(e){e&&("sidebar"==r.orientation?r.showVerticalGraph(a(e),!1):r.showFullGraph(a(e)))})),e||o.clearViewNode()}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(31),a=n(458);n(459),n(206),n(467),n(469),n(472),n(476),r.module("dbt").controller("MainController",["$scope","$route","$state","project","graph","selectorService","trackingService","locationService","$transitions",function(e,t,n,r,s,l,c,u,d){function f(t){e.model_uid=t;var n=r.node(t);n&&l.resetSelection(n)}function p(e){e&&setTimeout((function(){var t=o("*[data-nav-unique-id='"+e+"']");t.length&&t[0].scrollIntoView&&t[0].scrollIntoView({behavior:"smooth",block:"center",inline:"center"})}),1)}e.tree={database:{},project:{},sources:{}},e.search={query:"",results:[],is_focused:!1},e.logo=a,e.model_uid=null,e.project={},o("body").bind("keydown",(function(e){"t"==event.key&&"INPUT"!=event.target.tagName&&(console.log("Opening search"),o("#search").focus(),event.preventDefault())})),e.onSearchFocus=function(t,n){e.search.is_focused=n},e.clearSearch=function(){e.search.is_focused=!1,e.search.query="",e.search.results=[],o("#search").blur()},e.$on("clearSearch",(function(){e.clearSearch()})),e.$on("query",(function(t,n){e.search.is_focused=!0,e.search.query=n})),e.onSearchKeypress=function(t){"Escape"==t.key&&(e.clearSearch(),t.preventDefault())},r.getModelTree(n.params.unique_id,(function(t){e.tree.database=t.database,e.tree.project=t.project,e.tree.sources=t.sources,e.tree.exposures=t.exposures,e.tree.metrics=t.metrics,setTimeout((function(){p(e.model_uid)}))})),d.onSuccess({},(function(t,n){var i=t.router.globals.params,o=l.getViewNode(),a=o?o.unique_id:null,s=i.unique_id,u=!0;if(t.from().name==t.to().name&&a==s&&(u=!1),u&&i.unique_id){var d=r.updateSelected(i.unique_id);e.tree.database=d.database,e.tree.project=d.project,e.tree.sources=d.sources,e.search.query="",console.log("updating selected model to: ",i),f(i.unique_id),setTimeout((function(){p(i.unique_id)}))}u&&c.track_pageview()})),e.$watch("search.query",(function(t){e.search.results=function(t){if(""===e.search.query)return t;let n={name:10,tags:5,description:3,raw_code:2,columns:1};return i.each(t,(function(t){t.overallWeight=0,i.each(Object.keys(n),(function(r){if(null!=t.model[r]){let o=0,a=t.model[r],s=e.search.query.toLowerCase();if("columns"===r)i.each(a,(function(e){if(e.name){let t=e.name.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}}));else if("tags"===r)i.each(a,(function(e){let t=e.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}));else{a=a.toLowerCase();let e=0;for(;-1!=e;)e=a.indexOf(s,e),-1!=e&&(o++,e++)}t.overallWeight+=o*n[r]}}))})),t}(r.search(t))})),r.init(),r.ready((function(t){e.project=t,e.search.results=r.search("");var o=i.unique(i.pluck(i.values(t.nodes),"package_name")).sort(),a=[null];i.each(t.nodes,(function(e){var t=e.tags;a=i.union(a,t).sort()})),l.init({packages:o,tags:a}),f(n.params.unique_id);var d=u.parseState(n.params);d.show_graph&&s.ready((function(){i.assign(l.selection.dirty,d.selected);var e=l.updateSelection();s.updateGraph(e)}));var p=t.metadata||{};c.init({track:p.send_anonymous_usage_stats,project_id:p.project_id})}))}])},function(e,t){e.exports="data:image/svg+xml,%3Csvg width='242' height='90' viewBox='0 0 242 90' fill='none' xmlns='http://www.w3.org/2000/svg'%3E %3Cpath d='M240.384 74.5122L239.905 75.8589H239.728L239.249 74.5156V75.8589H238.941V74.0234H239.324L239.816 75.3872L240.309 74.0234H240.691V75.8589H240.384V74.5122ZM238.671 74.3003H238.169V75.8589H237.858V74.3003H237.352V74.0234H238.671V74.3003Z' fill='%23262A38'/%3E %3Cpath d='M154.123 13.915V75.3527H141.672V69.0868C140.37 71.2839 138.499 73.0742 136.22 74.2134C133.779 75.434 131.012 76.085 128.246 76.085C124.828 76.1664 121.41 75.1899 118.562 73.2369C115.633 71.2839 113.354 68.5986 111.889 65.425C110.262 61.7631 109.448 57.8572 109.529 53.8698C109.448 49.8825 110.262 45.9765 111.889 42.3961C113.354 39.3038 115.633 36.6185 118.481 34.7469C121.41 32.8753 124.828 31.9801 128.246 32.0615C130.931 32.0615 133.616 32.6311 135.976 33.8517C138.255 34.991 140.126 36.6999 141.428 38.8156V18.0651L154.123 13.915ZM139.15 63.2279C140.777 61.1121 141.672 58.0199 141.672 54.0326C141.672 50.0452 140.859 47.0344 139.15 44.9187C137.441 42.8029 134.755 41.5823 131.989 41.6637C129.222 41.5009 126.537 42.7215 124.746 44.8373C123.038 46.953 122.142 49.9639 122.142 53.8698C122.142 57.8572 123.038 60.9494 124.746 63.1465C126.455 65.3436 129.222 66.5642 131.989 66.4828C135.081 66.4828 137.522 65.3436 139.15 63.2279Z' fill='%23262A38'/%3E %3Cpath d='M198.635 34.6655C201.564 36.5371 203.843 39.2225 205.226 42.3147C206.853 45.8952 207.667 49.8011 207.586 53.7885C207.667 57.7758 206.853 61.7632 205.226 65.3436C203.761 68.5172 201.483 71.2026 198.553 73.1556C195.705 75.0272 192.287 76.0037 188.87 75.9223C186.103 76.0037 183.336 75.3527 180.895 74.0507C178.617 72.9114 176.745 71.1212 175.524 68.9241V75.2713H162.993V18.0651L175.606 13.915V38.9783C176.826 36.7812 178.698 34.991 180.976 33.8517C183.418 32.5498 186.103 31.8988 188.87 31.9801C192.287 31.8988 195.705 32.8753 198.635 34.6655ZM192.45 63.1465C194.159 60.9494 194.973 57.8572 194.973 53.7885C194.973 49.8825 194.159 46.8716 192.45 44.7559C190.741 42.6402 188.381 41.5823 185.289 41.5823C182.523 41.4196 179.837 42.6402 178.047 44.8373C176.338 47.0344 175.524 50.0452 175.524 53.9512C175.524 57.9386 176.338 61.0308 178.047 63.1465C179.756 65.3436 182.441 66.5642 185.289 66.4015C188.056 66.5642 190.741 65.3436 192.45 63.1465Z' fill='%23262A38'/%3E %3Cpath d='M225 42.4774V58.915C225 61.2749 225.651 62.9838 226.791 64.0416C228.093 65.1809 229.801 65.7505 231.592 65.6691C232.975 65.6691 234.44 65.425 235.742 65.0995V74.8644C233.382 75.6782 230.941 76.085 228.499 76.0037C223.292 76.0037 219.304 74.5389 216.537 71.6094C213.771 68.68 212.387 64.5299 212.387 59.1592V23.1103L225 19.0416V33.038H235.742V42.4774H225Z' fill='%23262A38'/%3E %3Cpath d='M86.1754 3.74322C88.2911 5.77758 89.6745 8.46293 90 11.3924C90 12.613 89.6745 13.4268 88.9421 14.9729C88.2098 16.519 79.1772 32.1429 76.4919 36.4557C74.9458 38.9783 74.132 41.9892 74.132 44.9186C74.132 47.9295 74.9458 50.859 76.4919 53.3816C79.1772 57.6944 88.2098 73.3996 88.9421 74.9457C89.6745 76.4919 90 77.2242 90 78.4448C89.6745 81.3743 88.3725 84.0597 86.2568 86.0127C84.2224 88.1284 81.5371 89.5118 78.689 89.7559C77.4684 89.7559 76.6546 89.4304 75.1899 88.698C73.7251 87.9656 57.7758 79.1772 53.4629 76.4919C53.1374 76.3291 52.8119 76.085 52.4051 75.9222L31.085 63.3092C31.5732 67.3779 33.3635 71.2839 36.2929 74.132C36.8626 74.7016 37.4322 75.1899 38.0832 75.6781C37.5949 75.9222 37.0253 76.1664 36.5371 76.4919C32.2242 79.1772 16.519 88.2098 14.9729 88.9421C13.4268 89.6745 12.6944 90 11.3924 90C8.46293 89.6745 5.77758 88.3725 3.82459 86.2568C1.70886 84.2224 0.325497 81.5371 0 78.6076C0.0813743 77.387 0.406872 76.1664 1.05787 75.1085C1.79024 73.5624 10.8228 57.8571 13.5081 53.5443C15.0542 51.0217 15.868 48.0922 15.868 45.0814C15.868 42.0705 15.0542 39.141 13.5081 36.6184C10.8228 32.1429 1.70886 16.4376 1.05787 14.8915C0.406872 13.8336 0.0813743 12.613 0 11.3924C0.325497 8.46293 1.62749 5.77758 3.74322 3.74322C5.77758 1.62749 8.46293 0.325497 11.3924 0C12.613 0.0813743 13.8336 0.406872 14.9729 1.05787C16.2749 1.62749 27.7486 8.30018 33.8517 11.8807L35.2351 12.6944C35.7233 13.0199 36.1302 13.264 36.4557 13.4268L37.1067 13.8336L58.8336 26.6908C58.3454 21.8083 55.8228 17.3327 51.9168 14.3219C52.4051 14.0778 52.9747 13.8336 53.4629 13.5081C57.7758 10.8228 73.481 1.70886 75.0271 1.05787C76.085 0.406872 77.3056 0.0813743 78.6076 0C81.4557 0.325497 84.1411 1.62749 86.1754 3.74322ZM46.1392 50.7776L50.7776 46.1392C51.4286 45.4882 51.4286 44.5118 50.7776 43.8608L46.1392 39.2224C45.4882 38.5714 44.5118 38.5714 43.8608 39.2224L39.2224 43.8608C38.5714 44.5118 38.5714 45.4882 39.2224 46.1392L43.8608 50.7776C44.4304 51.3472 45.4882 51.3472 46.1392 50.7776Z' fill='%23FF694A'/%3E %3C/svg%3E"},function(e,t,n){"use strict";n.r(t);var r=n(63),i=n.n(r);n(460),n(461),n(462),n(463),n(465);const o=n(9),a=(n(31),n(21));window.Prism=i.a,o.module("dbt").factory("code",["$sce",function(e){var t={copied:!1,highlight:function(t,n="sql"){if("sql"==n)var r=i.a.highlight(t,i.a.languages.sql,"sql");else if("python"==n)r=i.a.highlight(t,i.a.languages.python,"python");return e.trustAsHtml(r)},copy_to_clipboard:function(e){var t=document.createElement("textarea");t.value=e,t.setAttribute("readonly",""),t.style.position="absolute",t.style.left="-9999px",document.body.appendChild(t),t.select(),document.execCommand("copy"),document.body.removeChild(t)},generateSourceSQL:function(e){var t=["select"],n=a.size(e.columns),r=a.keys(e.columns);a.each(r,(function(e,r){var i=" "+e;r+1!=n&&(i+=","),t.push(i)}));const i=(e.database?e.database+".":"")+e.schema+"."+e.identifier;return t.push("from "+i),t.join("\n")},generateMetricSQL:function(e){if("derived"==e.calculation_method)return"-- derived\n"+e.expression;const t=[`select ${e.calculation_method}(${e.expression})`,`from {{ ${e.model} }}`];if(e.filters.length>0){const n=e.filters.map(e=>`${e.field} ${e.operator} ${e.value}`).join(" AND ");t.push("where "+n)}return t.join("\n")}};return t}])},function(e,t){Prism.languages.sql={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:--|\/\/|#).*)/,lookbehind:!0},variable:[{pattern:/@(["'`])(?:\\[\s\S]|(?!\1)[^\\])+\1/,greedy:!0},/@[\w.$]+/],string:{pattern:/(^|[^@\\])("|')(?:\\[\s\S]|(?!\2)[^\\]|\2\2)*\2/,greedy:!0,lookbehind:!0},identifier:{pattern:/(^|[^@\\])`(?:\\[\s\S]|[^`\\]|``)*`/,greedy:!0,lookbehind:!0,inside:{punctuation:/^`|`$/}},function:/\b(?:AVG|COUNT|FIRST|FORMAT|LAST|LCASE|LEN|MAX|MID|MIN|MOD|NOW|ROUND|SUM|UCASE)(?=\s*\()/i,keyword:/\b(?:ACTION|ADD|AFTER|ALGORITHM|ALL|ALTER|ANALYZE|ANY|APPLY|AS|ASC|AUTHORIZATION|AUTO_INCREMENT|BACKUP|BDB|BEGIN|BERKELEYDB|BIGINT|BINARY|BIT|BLOB|BOOL|BOOLEAN|BREAK|BROWSE|BTREE|BULK|BY|CALL|CASCADED?|CASE|CHAIN|CHAR(?:ACTER|SET)?|CHECK(?:POINT)?|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMNS?|COMMENT|COMMIT(?:TED)?|COMPUTE|CONNECT|CONSISTENT|CONSTRAINT|CONTAINS(?:TABLE)?|CONTINUE|CONVERT|CREATE|CROSS|CURRENT(?:_DATE|_TIME|_TIMESTAMP|_USER)?|CURSOR|CYCLE|DATA(?:BASES?)?|DATE(?:TIME)?|DAY|DBCC|DEALLOCATE|DEC|DECIMAL|DECLARE|DEFAULT|DEFINER|DELAYED|DELETE|DELIMITERS?|DENY|DESC|DESCRIBE|DETERMINISTIC|DISABLE|DISCARD|DISK|DISTINCT|DISTINCTROW|DISTRIBUTED|DO|DOUBLE|DROP|DUMMY|DUMP(?:FILE)?|DUPLICATE|ELSE(?:IF)?|ENABLE|ENCLOSED|END|ENGINE|ENUM|ERRLVL|ERRORS|ESCAPED?|EXCEPT|EXEC(?:UTE)?|EXISTS|EXIT|EXPLAIN|EXTENDED|FETCH|FIELDS|FILE|FILLFACTOR|FIRST|FIXED|FLOAT|FOLLOWING|FOR(?: EACH ROW)?|FORCE|FOREIGN|FREETEXT(?:TABLE)?|FROM|FULL|FUNCTION|GEOMETRY(?:COLLECTION)?|GLOBAL|GOTO|GRANT|GROUP|HANDLER|HASH|HAVING|HOLDLOCK|HOUR|IDENTITY(?:COL|_INSERT)?|IF|IGNORE|IMPORT|INDEX|INFILE|INNER|INNODB|INOUT|INSERT|INT|INTEGER|INTERSECT|INTERVAL|INTO|INVOKER|ISOLATION|ITERATE|JOIN|KEYS?|KILL|LANGUAGE|LAST|LEAVE|LEFT|LEVEL|LIMIT|LINENO|LINES|LINESTRING|LOAD|LOCAL|LOCK|LONG(?:BLOB|TEXT)|LOOP|MATCH(?:ED)?|MEDIUM(?:BLOB|INT|TEXT)|MERGE|MIDDLEINT|MINUTE|MODE|MODIFIES|MODIFY|MONTH|MULTI(?:LINESTRING|POINT|POLYGON)|NATIONAL|NATURAL|NCHAR|NEXT|NO|NONCLUSTERED|NULLIF|NUMERIC|OFF?|OFFSETS?|ON|OPEN(?:DATASOURCE|QUERY|ROWSET)?|OPTIMIZE|OPTION(?:ALLY)?|ORDER|OUT(?:ER|FILE)?|OVER|PARTIAL|PARTITION|PERCENT|PIVOT|PLAN|POINT|POLYGON|PRECEDING|PRECISION|PREPARE|PREV|PRIMARY|PRINT|PRIVILEGES|PROC(?:EDURE)?|PUBLIC|PURGE|QUICK|RAISERROR|READS?|REAL|RECONFIGURE|REFERENCES|RELEASE|RENAME|REPEAT(?:ABLE)?|REPLACE|REPLICATION|REQUIRE|RESIGNAL|RESTORE|RESTRICT|RETURN(?:ING|S)?|REVOKE|RIGHT|ROLLBACK|ROUTINE|ROW(?:COUNT|GUIDCOL|S)?|RTREE|RULE|SAVE(?:POINT)?|SCHEMA|SECOND|SELECT|SERIAL(?:IZABLE)?|SESSION(?:_USER)?|SET(?:USER)?|SHARE|SHOW|SHUTDOWN|SIMPLE|SMALLINT|SNAPSHOT|SOME|SONAME|SQL|START(?:ING)?|STATISTICS|STATUS|STRIPED|SYSTEM_USER|TABLES?|TABLESPACE|TEMP(?:ORARY|TABLE)?|TERMINATED|TEXT(?:SIZE)?|THEN|TIME(?:STAMP)?|TINY(?:BLOB|INT|TEXT)|TOP?|TRAN(?:SACTIONS?)?|TRIGGER|TRUNCATE|TSEQUAL|TYPES?|UNBOUNDED|UNCOMMITTED|UNDEFINED|UNION|UNIQUE|UNLOCK|UNPIVOT|UNSIGNED|UPDATE(?:TEXT)?|USAGE|USE|USER|USING|VALUES?|VAR(?:BINARY|CHAR|CHARACTER|YING)|VIEW|WAITFOR|WARNINGS|WHEN|WHERE|WHILE|WITH(?: ROLLUP|IN)?|WORK|WRITE(?:TEXT)?|YEAR)\b/i,boolean:/\b(?:FALSE|NULL|TRUE)\b/i,number:/\b0x[\da-f]+\b|\b\d+(?:\.\d*)?|\B\.\d+\b/i,operator:/[-+*\/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?|\b(?:AND|BETWEEN|DIV|ILIKE|IN|IS|LIKE|NOT|OR|REGEXP|RLIKE|SOUNDS LIKE|XOR)\b/i,punctuation:/[;[\]()`,.]/}},function(e,t){Prism.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},"string-interpolation":{pattern:/(?:f|fr|rf)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|br|rb)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|br|rb)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/m,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:_(?=\s*:)|and|as|assert|async|await|break|case|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|match|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:False|None|True)\b/,number:/\b0(?:b(?:_?[01])+|o(?:_?[0-7])+|x(?:_?[a-f0-9])+)\b|(?:\b\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\B\.\d+(?:_\d+)*)(?:e[+-]?\d+(?:_\d+)*)?j?(?!\w)/i,operator:/[-+%=]=?|!=|:=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/},Prism.languages.python["string-interpolation"].inside.interpolation.inside.rest=Prism.languages.python,Prism.languages.py=Prism.languages.python},function(e,t){!function(){if("undefined"!=typeof Prism&&"undefined"!=typeof document){var e=/\n(?!$)/g,t=Prism.plugins.lineNumbers={getLine:function(e,t){if("PRE"===e.tagName&&e.classList.contains("line-numbers")){var n=e.querySelector(".line-numbers-rows");if(n){var r=parseInt(e.getAttribute("data-start"),10)||1,i=r+(n.children.length-1);ti&&(t=i);var o=t-r;return n.children[o]}}},resize:function(e){r([e])},assumeViewportIndependence:!0},n=void 0;window.addEventListener("resize",(function(){t.assumeViewportIndependence&&n===window.innerWidth||(n=window.innerWidth,r(Array.prototype.slice.call(document.querySelectorAll("pre.line-numbers"))))})),Prism.hooks.add("complete",(function(t){if(t.code){var n=t.element,i=n.parentNode;if(i&&/pre/i.test(i.nodeName)&&!n.querySelector(".line-numbers-rows")&&Prism.util.isActive(n,"line-numbers")){n.classList.remove("line-numbers"),i.classList.add("line-numbers");var o,a=t.code.match(e),s=a?a.length+1:1,l=new Array(s+1).join("");(o=document.createElement("span")).setAttribute("aria-hidden","true"),o.className="line-numbers-rows",o.innerHTML=l,i.hasAttribute("data-start")&&(i.style.counterReset="linenumber "+(parseInt(i.getAttribute("data-start"),10)-1)),t.element.appendChild(o),r([i]),Prism.hooks.run("line-numbers",t)}}})),Prism.hooks.add("line-numbers",(function(e){e.plugins=e.plugins||{},e.plugins.lineNumbers=!0}))}function r(t){if(0!=(t=t.filter((function(e){var t=function(e){if(!e)return null;return window.getComputedStyle?getComputedStyle(e):e.currentStyle||null}(e)["white-space"];return"pre-wrap"===t||"pre-line"===t}))).length){var n=t.map((function(t){var n=t.querySelector("code"),r=t.querySelector(".line-numbers-rows");if(n&&r){var i=t.querySelector(".line-numbers-sizer"),o=n.textContent.split(e);i||((i=document.createElement("span")).className="line-numbers-sizer",n.appendChild(i)),i.innerHTML="0",i.style.display="block";var a=i.getBoundingClientRect().height;return i.innerHTML="",{element:t,lines:o,lineHeights:[],oneLinerHeight:a,sizer:i}}})).filter(Boolean);n.forEach((function(e){var t=e.sizer,n=e.lines,r=e.lineHeights,i=e.oneLinerHeight;r[n.length-1]=void 0,n.forEach((function(e,n){if(e&&e.length>1){var o=t.appendChild(document.createElement("span"));o.style.display="block",o.textContent=e}else r[n]=i}))})),n.forEach((function(e){for(var t=e.sizer,n=e.lineHeights,r=0,i=0;i code {\n\tposition: relative;\n\twhite-space: inherit;\n}\n\n.line-numbers .line-numbers-rows {\n\tposition: absolute;\n\tpointer-events: none;\n\ttop: 0;\n\tfont-size: 100%;\n\tleft: -3.8em;\n\twidth: 3em; /* works for line-numbers below 1000 lines */\n\tletter-spacing: -1px;\n\tborder-right: 1px solid #999;\n\n\t-webkit-user-select: none;\n\t-moz-user-select: none;\n\t-ms-user-select: none;\n\tuser-select: none;\n\n}\n\n\t.line-numbers-rows > span {\n\t\tdisplay: block;\n\t\tcounter-increment: linenumber;\n\t}\n\n\t\t.line-numbers-rows > span:before {\n\t\t\tcontent: counter(linenumber);\n\t\t\tcolor: #999;\n\t\t\tdisplay: block;\n\t\t\tpadding-right: 0.8em;\n\t\t\ttext-align: right;\n\t\t}\n',""])},function(e,t,n){var r=n(466);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,'/**\n * GHColors theme by Avi Aryan (http://aviaryan.in)\n * Inspired by Github syntax coloring\n */\n\ncode[class*="language-"],\npre[class*="language-"] {\n\tcolor: #393A34;\n\tfont-family: "Consolas", "Bitstream Vera Sans Mono", "Courier New", Courier, monospace;\n\tdirection: ltr;\n\ttext-align: left;\n\twhite-space: pre;\n\tword-spacing: normal;\n\tword-break: normal;\n\tfont-size: .9em;\n\tline-height: 1.2em;\n\n\t-moz-tab-size: 4;\n\t-o-tab-size: 4;\n\ttab-size: 4;\n\n\t-webkit-hyphens: none;\n\t-moz-hyphens: none;\n\t-ms-hyphens: none;\n\thyphens: none;\n}\n\npre > code[class*="language-"] {\n\tfont-size: 1em;\n}\n\npre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,\ncode[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {\n\tbackground: #b3d4fc;\n}\n\npre[class*="language-"]::selection, pre[class*="language-"] ::selection,\ncode[class*="language-"]::selection, code[class*="language-"] ::selection {\n\tbackground: #b3d4fc;\n}\n\n/* Code blocks */\npre[class*="language-"] {\n\tpadding: 1em;\n\tmargin: .5em 0;\n\toverflow: auto;\n\tborder: 1px solid #dddddd;\n\tbackground-color: white;\n}\n\n/* Inline code */\n:not(pre) > code[class*="language-"] {\n\tpadding: .2em;\n\tpadding-top: 1px;\n\tpadding-bottom: 1px;\n\tbackground: #f8f8f8;\n\tborder: 1px solid #dddddd;\n}\n\n.token.comment,\n.token.prolog,\n.token.doctype,\n.token.cdata {\n\tcolor: #999988;\n\tfont-style: italic;\n}\n\n.token.namespace {\n\topacity: .7;\n}\n\n.token.string,\n.token.attr-value {\n\tcolor: #e3116c;\n}\n\n.token.punctuation,\n.token.operator {\n\tcolor: #393A34; /* no highlight */\n}\n\n.token.entity,\n.token.url,\n.token.symbol,\n.token.number,\n.token.boolean,\n.token.variable,\n.token.constant,\n.token.property,\n.token.regex,\n.token.inserted {\n\tcolor: #36acaa;\n}\n\n.token.atrule,\n.token.keyword,\n.token.attr-name,\n.language-autohotkey .token.selector {\n\tcolor: #00a4db;\n}\n\n.token.function,\n.token.deleted,\n.language-autohotkey .token.tag {\n\tcolor: #9a050f;\n}\n\n.token.tag,\n.token.selector,\n.language-autohotkey .token.keyword {\n\tcolor: #00009f;\n}\n\n.token.important,\n.token.function,\n.token.bold {\n\tfont-weight: bold;\n}\n\n.token.italic {\n\tfont-style: italic;\n}\n',""])},function(e,t,n){n(31);const r=n(21),i=n(148),o=n(203),a=n(468);angular.module("dbt").factory("graph",["$state","$window","$q","selectorService","project","locationService",function(e,t,n,s,l,c){var u={vertical:{userPanningEnabled:!1,boxSelectionEnabled:!1,maxZoom:1.5},horizontal:{userPanningEnabled:!0,boxSelectionEnabled:!1,maxZoom:1,minZoom:.05}},d={none:{name:"null"},left_right:{name:"dagre",rankDir:"LR",rankSep:200,edgeSep:30,nodeSep:50},top_down:{name:"preset",positions:function(t){var n=e.params.unique_id;if(!n)return{x:0,y:0};var a=f.graph.pristine.dag,s=r.sortBy(o.ancestorNodes(a,n,1)),l=r.sortBy(o.descendentNodes(a,n,1)),c=r.partial(r.includes,s),u=r.partial(r.includes,l),d=a.filterNodes(c),p=a.filterNodes(u);return function(e,t,n,i){console.log("Getting position for ",i,". Primary: ",e);var o,a=100/(1+Math.max(t.length,n.length));if(e==i)return{x:0,y:0};if(r.includes(t,i))o={set:t,index:r.indexOf(t,i),factor:-1,type:"parent"};else{if(!r.includes(n,i))return{x:0,y:0};o={set:n,index:r.indexOf(n,i),factor:1,type:"child"}}var s=o.set.length;if("parent"==o.type)var l={x:(0+o.index)*a,y:-200-100*(s-o.index-1)};else l={x:(0+o.index)*a,y:200+100*(s-o.index-1)};return l}(n,i.alg.topsort(d),i.alg.topsort(p).reverse(),t.data("id"))}}},f={loading:!0,loaded:n.defer(),graph_element:null,orientation:"sidebar",expanded:!1,graph:{options:u.vertical,pristine:{nodes:{},edges:{},dag:null},elements:[],layout:d.none,style:[{selector:"edge.vertical",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#027599","arrow-scale":1.5,"line-color":"#027599",width:3,"target-distance-from-node":"5px","source-endpoint":"0% 50%","target-endpoint":"0deg"}},{selector:"edge.horizontal",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#006f8a","arrow-scale":1.5,"target-distance-from-node":"10px","source-distance-from-node":"5px","line-color":"#006f8a",width:3,"source-endpoint":"50% 0%","target-endpoint":"270deg"}},{selector:"edge[selected=1]",style:{"line-color":"#bd6bb6","target-arrow-color":"#bd6bb6","z-index":1}},{selector:'node[display="none"]',style:{display:"none"}},{selector:"node.vertical",style:{"text-margin-x":"5px","background-color":"#0094b3","font-size":"16px",shape:"ellipse",color:"#fff",width:"5px",height:"5px",padding:"5px",content:"data(label)","font-weight":300,"text-valign":"center","text-halign":"right"}},{selector:"node.horizontal",style:{"background-color":"#0094b3","font-size":"24px",shape:"roundrectangle",color:"#fff",width:"label",height:"label",padding:"12px",content:"data(label)","font-weight":300,"font-family":'-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Fira Sans", "Droid Sans", "Helvetica Neue", Helvetica, Arial, sans-serif',"text-valign":"center","text-halign":"center",ghost:"yes","ghost-offset-x":"2px","ghost-offset-y":"4px","ghost-opacity":.5,"text-outline-color":"#000","text-outline-width":"1px","text-outline-opacity":.2}},{selector:'node[resource_type="source"]',style:{"background-color":"#5fb825"}},{selector:'node[resource_type="exposure"]',style:{"background-color":"#ff694b"}},{selector:'node[resource_type="metric"]',style:{"background-color":"#ff5688"}},{selector:'node[language="python"]',style:{"background-color":"#6a5acd"}},{selector:"node[node_color]",style:{"background-color":"data(node_color)"}},{selector:"node[selected=1]",style:{"background-color":"#bd6bb6"}},{selector:"node.horizontal[selected=1]",style:{"background-color":"#88447d"}},{selector:"node.horizontal.dirty",style:{"background-color":"#919599"}},{selector:"node[hidden=1]",style:{"background-color":"#919599","background-opacity":.5}}],ready:function(e){console.log("graph ready")}}};function p(e,t,n){var i=r.map(e,(function(e){return f.graph.pristine.nodes[e]})),o=[];r.flatten(r.each(e,(function(t){var n=f.graph.pristine.edges[t];r.each(n,(function(t){r.includes(e,t.data.target)&&r.includes(e,t.data.source)&&o.push(t)}))})));var s=r.compact(i).concat(r.compact(o));return r.each(f.graph.elements,(function(e){e.data.display="none",e.data.selected=0,e.data.hidden=0,e.classes=n})),r.each(s,(function(e){e.data.display="element",e.classes=n,t&&r.includes(t,e.data.unique_id)&&(e.data.selected=1),r.get(e,["data","docs","show"],!0)||(e.data.hidden=1);var i=r.get(e,["data","docs","node_color"]);i&&a.isValidColor(i)&&(e.data.node_color=i)})),f.graph.elements=r.filter(s,(function(e){return"element"==e.data.display})),e}function h(e,t,n){var r=f.graph.pristine.dag;if(r){var i=f.graph.pristine.nodes,o=s.selectNodes(r,i,e),a=n?o.matched:[];return p(o.selected,a,t)}}return f.setGraphReady=function(e){f.loading=!1,f.loaded.resolve(),f.graph_element=e},f.ready=function(e){f.loaded.promise.then((function(){e(f)}))},f.manifest={},f.packages=[],f.selected_node=null,f.getCanvasHeight=function(){return.8*t.innerHeight+"px"},l.ready((function(e){f.manifest=e,f.packages=r.uniq(r.map(f.manifest.nodes,"package_name")),r.each(r.filter(f.manifest.nodes,(function(e){var t=r.includes(["model","seed","source","snapshot","analysis","exposure","metric","operation"],e.resource_type),n="test"==e.resource_type&&!e.hasOwnProperty("test_metadata");return t||n})),(function(e){var t={group:"nodes",data:r.assign(e,{parent:e.package_name,id:e.unique_id,is_group:"false"})};f.graph.pristine.nodes[e.unique_id]=t})),r.each(f.manifest.parent_map,(function(e,t){r.each(e,(function(e){var n=f.manifest.nodes[e],i=f.manifest.nodes[t];if(r.includes(["model","source","seed","snapshot","metric"],n.resource_type)&&("test"!=i.resource_type||!i.hasOwnProperty("test_metadata"))){var o=n.unique_id+"|"+i.unique_id,a={group:"edges",data:{source:n.unique_id,target:i.unique_id,unique_id:o}},s=i.unique_id;f.graph.pristine.edges[s]||(f.graph.pristine.edges[s]=[]),f.graph.pristine.edges[s].push(a)}}))}));var t=new i.Graph({directed:!0});r.each(f.graph.pristine.nodes,(function(e){t.setNode(e.data.unique_id,e.data.name)})),r.each(f.graph.pristine.edges,(function(e){r.each(e,(function(e){t.setEdge(e.data.source,e.data.target)}))})),f.graph.pristine.dag=t,f.graph.elements=r.flatten(r.values(f.graph.pristine.nodes).concat(r.values(f.graph.pristine.edges))),p(t.nodes())})),f.hideGraph=function(){f.orientation="sidebar",f.expanded=!1},f.showVerticalGraph=function(e,t){f.orientation="sidebar",t&&(f.expanded=!0);var n=h(r.assign({},s.options,{include:"+"+e+"+",exclude:"",hops:1}),"vertical",!0);return f.graph.layout=d.top_down,f.graph.options=u.vertical,n},f.showFullGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=r.assign({},s.options);e?(t.include="+"+e+"+",t.exclude=""):(t.include="",t.exclude="");var n=h(t,"horizontal",!0);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(t),n},f.updateGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=h(e,"horizontal",!1);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(e),t},f.deselectNodes=function(){"fullscreen"==f.orientation&&f.graph_element.elements().data("selected",0)},f.selectNode=function(e){if("fullscreen"==f.orientation){f.graph.pristine.nodes[e];var t=f.graph.pristine.dag,n=r.indexBy(o.ancestorNodes(t,e)),i=r.indexBy(o.descendentNodes(t,e));n[e]=e,i[e]=e;var a=f.graph_element;r.each(f.graph.elements,(function(t){var r=a.$id(t.data.id);n[t.data.source]&&n[t.data.target]||i[t.data.source]&&i[t.data.target]||t.data.unique_id==e?r.data("selected",1):r.data("selected",0)}))}},f.markDirty=function(e){f.markAllClean(),r.each(e,(function(e){f.graph_element.$id(e).addClass("dirty")}))},f.markAllClean=function(){f.graph_element&&f.graph_element.elements().removeClass("dirty")},f}])},function(e,t,n){"use strict";n.r(t),n.d(t,"isValidColor",(function(){return i}));const r=new Set(["aliceblue","antiquewhite","aqua","aquamarine","azure","beige","bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood","cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk","crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgreen","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dodgerblue","firebrick","floralwhite","forestgreen","fuchsia","ghostwhite","gold","goldenrod","gray","green","greenyellow","honeydew","hotpink","indianred","indigo","ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral","lightcyan","lightgoldenrodyellow","lightgray","lightgreen","lightpink","lightsalmon","lightsalmon","lightseagreen","lightskyblue","lightslategray","lightsteelblue","lightyellow","lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid","mediumpurple","mediumseagreen","mediumslateblue","mediumslateblue","mediumspringgreen","mediumturquoise","mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy","oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen","paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue","purple","rebeccapurple","red","rosybrown","royalblue","saddlebrown","salmon","sandybrown","seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","snow","springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white","whitesmoke","yellow","yellowgreen"]);function i(e){if(!e)return!1;const t=e.trim().toLowerCase();if(""===t)return!1;const n=t.match(/^#([A-Fa-f0-9]{3}){1,2}$/),i=r.has(t);return Boolean(n)||i}},function(e,t,n){n(31);const r=n(21),i=n(470);angular.module("dbt").factory("selectorService",["$state",function(e){var t={include:"",exclude:"",packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"],depth:1},n={view_node:null,selection:{clean:r.clone(t),dirty:r.clone(t)},options:{packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"]},init:function(e){r.each(e,(function(e,r){n.options[r]=e,t[r]=e,n.selection.clean[r]=e,n.selection.dirty[r]=e}))},resetSelection:function(e){var i={include:e&&r.includes(["model","seed","snapshot"],e.resource_type)?"+"+e.name+"+":e&&"source"==e.resource_type?"+source:"+e.source_name+"."+e.name+"+":e&&"exposure"==e.resource_type?"+exposure:"+e.name:e&&"metric"==e.resource_type?"+metric:"+e.name:e&&r.includes(["analysis","test"],e.resource_type)?"+"+e.name:""},o=r.assign({},t,i);n.selection.clean=r.clone(o),n.selection.dirty=r.clone(o),n.view_node=e},getViewNode:function(){return n.view_node},excludeNode:function(e,t){var r,i=n.selection.dirty.exclude,o=t.parents?"+":"",a=t.children?"+":"",s=i.length>0?" ":"";"source"==e.resource_type?(o+="source:",r=e.source_name+"."+e.name):["exposure","metric"].indexOf(e.resource_type)>-1?(o+=e.resource_type+":",r=e.name):r=e.name;var l=i+s+o+r+a;return n.selection.dirty.exclude=l,n.updateSelection()},selectSource:function(e,t){var r="source:"+e+(t.children?"+":"");return n.selection.dirty.include=r,n.updateSelection()},clearViewNode:function(){n.view_node=null},isDirty:function(){return!r.isEqual(n.selection.clean,n.selection.dirty)},updateSelection:function(){return n.selection.clean=r.clone(n.selection.dirty),n.selection.clean},selectNodes:function(e,t,n){return i.selectNodes(e,t,n)}};return n}])},function(e,t,n){const r=n(21),i=n(471);function o(e,t){return t||(t=" "),r.filter(r.uniq(e.split(t)),(function(e){return e.length>0}))}function a(e){var t={raw:e,select_at:!1,select_children:!1,children_depth:null,select_parents:!1,parents_depth:null};const n=new RegExp(""+/^/.source+/(?(\@))?/.source+/(?((?(\d*))\+))?/.source+/((?([\w.]+)):)?/.source+/(?(.*?))/.source+/(?(\+(?(\d*))))?/.source+/$/.source).exec(e).groups;t.select_at="@"==n.childs_parents,t.select_parents=!!n.parents,t.select_children=!!n.children,n.parents_depth&&(t.parents_depth=parseInt(n.parents_depth)),n.children_depth&&(t.children_depth=parseInt(n.children_depth));var r=n.method,i=n.value;return r?-1!=r.indexOf(".")&&([r,selector_modifier]=r.split(".",2),i={config:selector_modifier,value:i}):r="implicit",t.selector_type=r,t.selector_value=i,t}function s(e){var t=o(e," ");return r.map(t,(function(e){var t=o(e,",");return t.length>1?{method:"intersect",selectors:r.map(t,a)}:{method:"none",selectors:r.map([e],a)}}))}function l(e,t){var n=s(e),i=null,o=null;return r.each(n,(function(e){var n="intersect"==e.method?r.intersection:r.union;r.each(e.selectors,(function(e){var r=t(e);null===i?(i=r.matched,o=r.selected):(i=n(i,r.matched),o=n(o,r.selected))}))})),{matched:i||[],selected:o||[]}}e.exports={splitSpecs:o,parseSpec:a,parseSpecs:s,buildSpec:function(e,t,n){return{include:s(e),exclude:s(t),hops:n}},applySpec:l,selectNodes:function(e,t,n){n.include,n.exclude;var o,a=r.partial(i.getNodesFromSpec,e,t,n.hops);r.values(t),o=0==n.include.trim().length?{selected:e.nodes(),matched:[]}:l(n.include,a);var s=l(n.exclude,a),c=o.selected,u=o.matched;c=r.difference(c,s.selected),u=r.difference(u,s.matched);var d=[];return r.each(c,(function(e){var i=t[e];i.data.tags||(i.data.tags=[]);var o=r.includes(n.packages,i.data.package_name),a=r.intersection(n.tags,i.data.tags).length>0,s=r.includes(n.tags,null)&&0==i.data.tags.length,l=r.includes(n.resource_types,i.data.resource_type);o&&(a||s)&&l||d.push(i.data.unique_id)})),{selected:r.difference(c,d),matched:r.difference(u,d)}}}},function(e,t,n){const r=n(21),i=n(203);var o="fqn",a="tag",s="source",l="exposure",c="metric",u="path",d="file",f="package",p="config",h="test_name",g="test_type",m={};function v(e,t){if(t===r.last(e))return!0;var n=e.reduce((e,t)=>e.concat(t.split(".")),[]),i=t.split(".");if(n.length-1||!r.hasOwnProperty("test_metadata")&&["data","singular"].indexOf(t)>-1)&&n.push(r)})),n}function $(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("source"==r.resource_type){var i,o,a=r.source_name,s=r.name;-1!=t.indexOf(".")?[i,o]=t.split(".",2):(i=t,o=null),("*"==i||i==a&&"*"===o||i==a&&o===s||i==a&&null===o)&&n.push(e.data)}})),n}m["implicit"]=function(e,t){var n=b(e,t),i=y(e,t),o=[];t.toLowerCase().endsWith(".sql")&&(o=x(e,t));var a=r.uniq([].concat(r.map(n,"unique_id"),r.map(i,"unique_id"),r.map(o,"unique_id")));return r.map(a,t=>e[t].data)},m[o]=b,m[a]=w,m[s]=$,m[l]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("exposure"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[c]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("metric"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[u]=y,m[d]=x,m[f]=k,m[p]=A,m[h]=E,m[g]=S,e.exports={isFQNMatch:v,getNodesByFQN:b,getNodesByTag:w,getNodesBySource:$,getNodesByPath:y,getNodesByPackage:k,getNodesByConfig:A,getNodesByTestName:E,getNodesByTestType:S,getNodesFromSpec:function(e,t,n,o){const a=m[o.selector_type];if(!a)return console.log("Node matcher for selector",o.selector_type,"is invalid"),{selected:[],matched:[]};var s=a(t,o.selector_value),l=[],c=[];return r.each(s,(function(t){var a=t.unique_id;c.push(t.unique_id);var s=[],u=[],d=[];if(o.select_at&&(d=r.union(i.selectAt(e,a))),o.select_parents){var f=n||o.parents_depth;s=i.ancestorNodes(e,a,f)}if(o.select_children){f=n||o.children_depth;u=i.descendentNodes(e,a,f)}l=r.union([a],l,u,s,d)})),{selected:l,matched:c}}}},function(e,t,n){const r=n(9);n(473);r.module("dbt").factory("trackingService",["$location","selectorService","$rootScope",function(e,t,n){var r={initialized:!1,snowplow:null,project_id:null,init:function(e){r.initialized||(r.initialized=!0,r.project_id=e.project_id,!0===e.track&&r.turn_on_tracking())},isHosted:function(){return window.location.hostname.indexOf(".getdbt.com")>-1},turn_on_tracking:function(){var e,t,n,i,o,a;e=window,t=document,n="script",e[i="snowplow"]||(e.GlobalSnowplowNamespace=e.GlobalSnowplowNamespace||[],e.GlobalSnowplowNamespace.push(i),e[i]=function(){(e[i].q=e[i].q||[]).push(arguments)},e[i].q=e[i].q||[],o=t.createElement(n),a=t.getElementsByTagName(n)[0],o.async=1,o.src="//d1fc8wv8zag5ca.cloudfront.net/2.9.0/sp.js",a.parentNode.insertBefore(o,a));var s={appId:"dbt-docs",forceSecureTracker:!0,respectDoNotTrack:!0,userFingerprint:!1,contexts:{webPage:!0}};r.isHosted()&&(s.cookieDomain=".getdbt.com"),r.snowplow=window.snowplow,r.snowplow("newTracker","sp","fishtownanalytics.sinter-collect.com",s),r.snowplow("enableActivityTracking",30,30),r.track_pageview()},fuzzUrls:function(){r.isHosted()||(r.snowplow("setCustomUrl","https://fuzzed.getdbt.com/"),r.snowplow("setReferrerUrl","https://fuzzed.getdbt.com/"))},getContext:function(){return[{schema:"iglu:com.dbt/dbt_docs/jsonschema/1-0-0",data:{is_cloud_hosted:r.isHosted(),core_project_id:r.project_id}}]},track_pageview:function(){if(r.snowplow){r.fuzzUrls();r.snowplow("trackPageView",null,r.getContext())}},track_event:function(e,t,n,i){r.snowplow&&(r.fuzzUrls(),r.snowplow("trackStructEvent","dbt-docs",e,t,n,i,r.getContext()))},track_graph_interaction:function(e,t){r.snowplow&&(r.fuzzUrls(),r.track_event("graph","interact",e,t))}};return r}])},function(e,t,n){var r,i,o,a,s;r=n(474),i=n(204).utf8,o=n(475),a=n(204).bin,(s=function(e,t){e.constructor==String?e=t&&"binary"===t.encoding?a.stringToBytes(e):i.stringToBytes(e):o(e)?e=Array.prototype.slice.call(e,0):Array.isArray(e)||e.constructor===Uint8Array||(e=e.toString());for(var n=r.bytesToWords(e),l=8*e.length,c=1732584193,u=-271733879,d=-1732584194,f=271733878,p=0;p>>24)|4278255360&(n[p]<<24|n[p]>>>8);n[l>>>5]|=128<>>9<<4)]=l;var h=s._ff,g=s._gg,m=s._hh,v=s._ii;for(p=0;p>>0,u=u+y>>>0,d=d+x>>>0,f=f+w>>>0}return r.endian([c,u,d,f])})._ff=function(e,t,n,r,i,o,a){var s=e+(t&n|~t&r)+(i>>>0)+a;return(s<>>32-o)+t},s._gg=function(e,t,n,r,i,o,a){var s=e+(t&r|n&~r)+(i>>>0)+a;return(s<>>32-o)+t},s._hh=function(e,t,n,r,i,o,a){var s=e+(t^n^r)+(i>>>0)+a;return(s<>>32-o)+t},s._ii=function(e,t,n,r,i,o,a){var s=e+(n^(t|~r))+(i>>>0)+a;return(s<>>32-o)+t},s._blocksize=16,s._digestsize=16,e.exports=function(e,t){if(null==e)throw new Error("Illegal argument "+e);var n=r.wordsToBytes(s(e,t));return t&&t.asBytes?n:t&&t.asString?a.bytesToString(n):r.bytesToHex(n)}},function(e,t){var n,r;n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",r={rotl:function(e,t){return e<>>32-t},rotr:function(e,t){return e<<32-t|e>>>t},endian:function(e){if(e.constructor==Number)return 16711935&r.rotl(e,8)|4278255360&r.rotl(e,24);for(var t=0;t0;e--)t.push(Math.floor(256*Math.random()));return t},bytesToWords:function(e){for(var t=[],n=0,r=0;n>>5]|=e[n]<<24-r%32;return t},wordsToBytes:function(e){for(var t=[],n=0;n<32*e.length;n+=8)t.push(e[n>>>5]>>>24-n%32&255);return t},bytesToHex:function(e){for(var t=[],n=0;n>>4).toString(16)),t.push((15&e[n]).toString(16));return t.join("")},hexToBytes:function(e){for(var t=[],n=0;n>>6*(3-o)&63)):t.push("=");return t.join("")},base64ToBytes:function(e){e=e.replace(/[^A-Z0-9+\/]/gi,"");for(var t=[],r=0,i=0;r>>6-2*i);return t}},e.exports=r},function(e,t){function n(e){return!!e.constructor&&"function"==typeof e.constructor.isBuffer&&e.constructor.isBuffer(e)} /*! * Determine if an object is a Buffer * diff --git a/core/dbt/lib.py b/core/dbt/lib.py index ff8f06c88a8..f4b9ab5be0e 100644 --- a/core/dbt/lib.py +++ b/core/dbt/lib.py @@ -1,4 +1,6 @@ import os +from dbt.config.project import Project +from dbt.config.renderer import DbtProjectYamlRenderer from dbt.contracts.results import RunningStatus, collect_timing_info from dbt.events.functions import fire_event from dbt.events.types import NodeCompiling, NodeExecuting @@ -29,11 +31,10 @@ def compile_and_execute(self, manifest, ctx): method. Once conditional credential usage is enabled, this should be removed. """ result = None - ctx.node._event_status["node_status"] = RunningStatus.Compiling + ctx.node.update_event_status(node_status=RunningStatus.Compiling) fire_event( NodeCompiling( node_info=ctx.node.node_info, - unique_id=ctx.node.unique_id, ) ) with collect_timing_info("compile") as timing_info: @@ -45,11 +46,10 @@ def compile_and_execute(self, manifest, ctx): # for ephemeral nodes, we only want to compile, not run if not ctx.node.is_ephemeral_model: - ctx.node._event_status["node_status"] = RunningStatus.Executing + ctx.node.update_event_status(node_status=RunningStatus.Executing) fire_event( NodeExecuting( node_info=ctx.node.node_info, - unique_id=ctx.node.unique_id, ) ) with collect_timing_info("execute") as timing_info: @@ -71,16 +71,22 @@ def get_dbt_config(project_dir, args=None, single_threaded=False): else: profiles_dir = flags.DEFAULT_PROFILES_DIR + profile_name = getattr(args, "profile", None) + runtime_args = RuntimeArgs( project_dir=project_dir, profiles_dir=profiles_dir, single_threaded=single_threaded, - profile=getattr(args, "profile", None), + profile=profile_name, target=getattr(args, "target", None), ) - # Construct a RuntimeConfig from phony args - config = RuntimeConfig.from_args(runtime_args) + profile = RuntimeConfig.collect_profile(args=runtime_args, profile_name=profile_name) + project_renderer = DbtProjectYamlRenderer(profile, None) + project = RuntimeConfig.collect_project(args=runtime_args, project_renderer=project_renderer) + assert type(project) is Project + + config = RuntimeConfig.from_parts(project, profile, runtime_args) # Set global flags from arguments flags.set_from_args(args, config) diff --git a/core/dbt/logger.py b/core/dbt/logger.py index 4bbcfca4c06..0c7ba2fe8f2 100644 --- a/core/dbt/logger.py +++ b/core/dbt/logger.py @@ -191,11 +191,6 @@ def process(self, record): record.level = self.target_level -class JsonOnly(logbook.Processor): - def process(self, record): - record.extra["json_only"] = True - - class TextOnly(logbook.Processor): def process(self, record): record.extra["text_only"] = True diff --git a/core/dbt/main.py b/core/dbt/main.py index 24b053d0997..3c23cfec4b3 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -211,7 +211,8 @@ def run_from_args(parsed): if task.config is not None: log_path = getattr(task.config, "log_path", None) log_manager.set_path(log_path) - setup_event_logger(log_path or "logs", "json", False, True) + # WHY WE SET DEBUG TO BE TRUE HERE previously? + setup_event_logger(log_path or "logs", "json", False, False) fire_event(MainReportVersion(version=str(dbt.version.installed), log_version=LOG_VERSION)) fire_event(MainReportArgs(args=args_to_dict(parsed))) @@ -482,6 +483,20 @@ def _add_defer_argument(*subparsers): ) +def _add_favor_state_argument(*subparsers): + for sub in subparsers: + sub.add_optional_argument_inverse( + "--favor-state", + enable_help=""" + If set, defer to the state variable for resolving unselected nodes, even if node exist as a database object in the current environment. + """, + disable_help=""" + If defer is set, expect standard defer behaviour. + """, + default=flags.FAVOR_STATE_MODE, + ) + + def _build_run_subparser(subparsers, base_subparser): run_sub = subparsers.add_parser( "run", @@ -1072,14 +1087,6 @@ def parse_args(args, cls=DBTArgumentParser): """, ) - p.add_argument( - "--event-buffer-size", - dest="event_buffer_size", - help=""" - Sets the max number of events to buffer in EVENT_HISTORY - """, - ) - p.add_argument( "-q", "--quiet", @@ -1154,6 +1161,8 @@ def parse_args(args, cls=DBTArgumentParser): _add_selection_arguments(run_sub, compile_sub, generate_sub, test_sub, snapshot_sub, seed_sub) # --defer _add_defer_argument(run_sub, test_sub, build_sub, snapshot_sub, compile_sub) + # --favor-state + _add_favor_state_argument(run_sub, test_sub, build_sub, snapshot_sub) # --full-refresh _add_table_mutability_arguments(run_sub, compile_sub, build_sub) diff --git a/core/dbt/node_types.py b/core/dbt/node_types.py index a6fa5ff4f84..ec7517d2029 100644 --- a/core/dbt/node_types.py +++ b/core/dbt/node_types.py @@ -13,7 +13,7 @@ class NodeType(StrEnum): # TODO: rm? RPCCall = "rpc" SqlOperation = "sql operation" - Documentation = "docs block" + Documentation = "doc" Source = "source" Macro = "macro" Exposure = "exposure" diff --git a/core/dbt/parser/README.md b/core/dbt/parser/README.md index 6ab326c42a6..7e4c208cdf9 100644 --- a/core/dbt/parser/README.md +++ b/core/dbt/parser/README.md @@ -126,17 +126,17 @@ These have executable SQL attached. Models - Are generated from SQL files in the 'models' directory - have a unique_id starting with 'model.' -- Final object is a ParsedModelNode +- Final object is a ModelNode -Data Tests +Singular Tests - Are generated from SQL files in 'tests' directory - have a unique_id starting with 'test.' -- Final object is a ParsedDataTestNode +- Final object is a SingularTestNode -Schema Tests +Generic Tests - Are generated from 'tests' in schema yaml files, which ultimately derive from tests in the 'macros' directory - Have a unique_id starting with 'test.' -- Final object is a ParsedSchemaTestNode +- Final object is a GenericTestNode - fqn is .schema_test. Hooks @@ -146,35 +146,35 @@ Hooks Analysis - comes from SQL files in 'analysis' directory -- Final object is a ParsedAnalysisNode +- Final object is a AnalysisNode RPC Node - This is a "node" representing the bit of Jinja-SQL that gets passed into the run_sql or compile_sql methods. When you're using the Cloud IDE, and you're working in a scratch tab, and you just want to compile/run what you have there: it needs to be parsed and executed, but it's not actually a model/node in the project, so it's this special thing. This is a temporary addition to the running manifest. -- Object is a ParsedRPCNode +- Object is a RPCNode ### sources - comes from 'sources' sections in yaml files -- Final object is a ParsedSourceDefinition node +- Final object is a SourceDefinition node - have a unique_id starting with 'source.' ### macros - comes from SQL files in 'macros' directory -- Final object is a ParsedMacro node +- Final object is a Macro node - have a unique_id starting with 'macro.' - Test macros are used in schema tests ### docs - comes from .md files in 'docs' directory -- Final object is a ParsedDocumentation +- Final object is a Documentation ### exposures - comes from 'exposures' sections in yaml files -- Final object is a ParsedExposure node +- Final object is a Exposure node ## Temporary patch files diff --git a/core/dbt/parser/analysis.py b/core/dbt/parser/analysis.py index 17eadb8783b..2102a76ac2e 100644 --- a/core/dbt/parser/analysis.py +++ b/core/dbt/parser/analysis.py @@ -1,16 +1,16 @@ import os -from dbt.contracts.graph.parsed import ParsedAnalysisNode +from dbt.contracts.graph.nodes import AnalysisNode from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.search import FileBlock -class AnalysisParser(SimpleSQLParser[ParsedAnalysisNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedAnalysisNode: +class AnalysisParser(SimpleSQLParser[AnalysisNode]): + def parse_from_dict(self, dct, validate=True) -> AnalysisNode: if validate: - ParsedAnalysisNode.validate(dct) - return ParsedAnalysisNode.from_dict(dct) + AnalysisNode.validate(dct) + return AnalysisNode.from_dict(dct) @property def resource_type(self) -> NodeType: diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index 4b9e666a421..9c245214d83 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -16,9 +16,9 @@ from dbt.config import Project, RuntimeConfig from dbt.context.context_config import ContextConfig from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import HasUniqueID, ManifestNodes +from dbt.contracts.graph.nodes import ManifestNode, BaseNode from dbt.contracts.graph.unparsed import UnparsedNode, Docs -from dbt.exceptions import ParsingException, validator_error_message, InternalException +from dbt.exceptions import InternalException, InvalidConfigUpdate, InvalidDictParse from dbt import hooks from dbt.node_types import NodeType, ModelLanguage from dbt.parser.search import FileBlock @@ -26,11 +26,11 @@ # internally, the parser may store a less-restrictive type that will be # transformed into the final type. But it will have to be derived from # ParsedNode to be operable. -FinalValue = TypeVar("FinalValue", bound=HasUniqueID) -IntermediateValue = TypeVar("IntermediateValue", bound=HasUniqueID) +FinalValue = TypeVar("FinalValue", bound=BaseNode) +IntermediateValue = TypeVar("IntermediateValue", bound=BaseNode) IntermediateNode = TypeVar("IntermediateNode", bound=Any) -FinalNode = TypeVar("FinalNode", bound=ManifestNodes) +FinalNode = TypeVar("FinalNode", bound=ManifestNode) ConfiguredBlockType = TypeVar("ConfiguredBlockType", bound=FileBlock) @@ -169,7 +169,6 @@ def _create_error_node( resource_type=self.resource_type, path=path, original_file_path=original_file_path, - root_path=self.project.project_root, package_name=self.project.project_name, raw_code=raw_code, language=language, @@ -192,6 +191,7 @@ def _create_parsetime_node( name = block.name if block.path.relative_path.endswith(".py"): language = ModelLanguage.python + config.add_config_call({"materialized": "table"}) else: # this is not ideal but we have a lot of tests to adjust if don't do it language = ModelLanguage.sql @@ -202,7 +202,6 @@ def _create_parsetime_node( "database": self.default_database, "fqn": fqn, "name": name, - "root_path": self.project.project_root, "resource_type": self.resource_type, "path": path, "original_file_path": block.path.original_file_path, @@ -217,7 +216,6 @@ def _create_parsetime_node( try: return self.parse_from_dict(dct, validate=True) except ValidationError as exc: - msg = validator_error_message(exc) # this is a bit silly, but build an UnparsedNode just for error # message reasons node = self._create_error_node( @@ -226,7 +224,7 @@ def _create_parsetime_node( original_file_path=block.path.original_file_path, raw_code=block.contents, ) - raise ParsingException(msg, node=node) + raise InvalidDictParse(exc, node=node) def _context_for(self, parsed_node: IntermediateNode, config: ContextConfig) -> Dict[str, Any]: return generate_parser_model_context(parsed_node, self.root_project, self.manifest, config) @@ -255,12 +253,13 @@ def update_parsed_node_config_dict( self._mangle_hooks(final_config_dict) parsed_node.config = parsed_node.config.from_dict(final_config_dict) - def update_parsed_node_name( + def update_parsed_node_relation_names( self, parsed_node: IntermediateNode, config_dict: Dict[str, Any] ) -> None: self._update_node_database(parsed_node, config_dict) self._update_node_schema(parsed_node, config_dict) self._update_node_alias(parsed_node, config_dict) + self._update_node_relation_name(parsed_node) def update_parsed_node_config( self, @@ -319,7 +318,7 @@ def update_parsed_node_config( # parsed_node.config is what it would be if they did nothing self.update_parsed_node_config_dict(parsed_node, config_dict) # This updates the node database/schema/alias - self.update_parsed_node_name(parsed_node, config_dict) + self.update_parsed_node_relation_names(parsed_node, config_dict) # tests don't have hooks if parsed_node.resource_type == NodeType.Test: @@ -364,10 +363,9 @@ def render_update(self, node: IntermediateNode, config: ContextConfig) -> None: self.update_parsed_node_config(node, config, context=context) except ValidationError as exc: # we got a ValidationError - probably bad types in config() - msg = validator_error_message(exc) - raise ParsingException(msg, node=node) from exc + raise InvalidConfigUpdate(exc, node=node) from exc - def add_result_node(self, block: FileBlock, node: ManifestNodes): + def add_result_node(self, block: FileBlock, node: ManifestNode): if node.config.enabled: self.manifest.add_node(block.file, node) else: @@ -390,6 +388,19 @@ def parse_node(self, block: ConfiguredBlockType) -> FinalNode: self.add_result_node(block, result) return result + def _update_node_relation_name(self, node: ManifestNode): + # Seed and Snapshot nodes and Models that are not ephemeral, + # and TestNodes that store_failures. + # TestNodes do not get a relation_name without store failures + # because no schema is created. + if node.is_relational and not node.is_ephemeral_model: + adapter = get_adapter(self.root_project) + relation_cls = adapter.Relation + node.relation_name = str(relation_cls.create_from(self.root_project, node)) + else: + # Set it to None in case it changed with a config update + node.relation_name = None + @abc.abstractmethod def parse_file(self, file_block: FileBlock) -> None: pass diff --git a/core/dbt/parser/docs.py b/core/dbt/parser/docs.py index f24f70544d5..edc7f83acfc 100644 --- a/core/dbt/parser/docs.py +++ b/core/dbt/parser/docs.py @@ -4,7 +4,7 @@ from dbt.clients.jinja import get_rendered from dbt.contracts.files import SourceFile -from dbt.contracts.graph.parsed import ParsedDocumentation +from dbt.contracts.graph.nodes import Documentation from dbt.node_types import NodeType from dbt.parser.base import Parser from dbt.parser.search import BlockContents, FileBlock, BlockSearcher @@ -13,7 +13,7 @@ SHOULD_PARSE_RE = re.compile(r"{[{%]") -class DocumentationParser(Parser[ParsedDocumentation]): +class DocumentationParser(Parser[Documentation]): @property def resource_type(self) -> NodeType: return NodeType.Documentation @@ -23,22 +23,21 @@ def get_compiled_path(cls, block: FileBlock): return block.path.relative_path def generate_unique_id(self, resource_name: str, _: Optional[str] = None) -> str: - # because docs are in their own graph namespace, node type doesn't - # need to be part of the unique ID. - return "{}.{}".format(self.project.project_name, resource_name) + # For consistency, use the same format for doc unique_ids + return f"doc.{self.project.project_name}.{resource_name}" - def parse_block(self, block: BlockContents) -> Iterable[ParsedDocumentation]: + def parse_block(self, block: BlockContents) -> Iterable[Documentation]: unique_id = self.generate_unique_id(block.name) contents = get_rendered(block.contents, {}).strip() - doc = ParsedDocumentation( - root_path=self.project.project_root, + doc = Documentation( path=block.file.path.relative_path, original_file_path=block.path.original_file_path, package_name=self.project.project_name, unique_id=unique_id, name=block.name, block_contents=contents, + resource_type=NodeType.Documentation, ) return [doc] diff --git a/core/dbt/parser/generic_test.py b/core/dbt/parser/generic_test.py index 4706119585b..822dd5b2d85 100644 --- a/core/dbt/parser/generic_test.py +++ b/core/dbt/parser/generic_test.py @@ -4,9 +4,8 @@ from dbt.exceptions import ParsingException from dbt.clients import jinja -from dbt.contracts.graph.parsed import ParsedGenericTestNode +from dbt.contracts.graph.nodes import GenericTestNode, Macro from dbt.contracts.graph.unparsed import UnparsedMacro -from dbt.contracts.graph.parsed import ParsedMacro from dbt.contracts.files import SourceFile from dbt.events.functions import fire_event from dbt.events.types import GenericTestFileParse @@ -14,9 +13,10 @@ from dbt.parser.base import BaseParser from dbt.parser.search import FileBlock from dbt.utils import MACRO_PREFIX +from dbt import flags -class GenericTestParser(BaseParser[ParsedGenericTestNode]): +class GenericTestParser(BaseParser[GenericTestNode]): @property def resource_type(self) -> NodeType: return NodeType.Macro @@ -27,21 +27,20 @@ def get_compiled_path(cls, block: FileBlock): def parse_generic_test( self, block: jinja.BlockTag, base_node: UnparsedMacro, name: str - ) -> ParsedMacro: + ) -> Macro: unique_id = self.generate_unique_id(name) - return ParsedMacro( + return Macro( path=base_node.path, macro_sql=block.full_block, original_file_path=base_node.original_file_path, package_name=base_node.package_name, - root_path=base_node.root_path, resource_type=base_node.resource_type, name=name, unique_id=unique_id, ) - def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[ParsedMacro]: + def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[Macro]: try: blocks: List[jinja.BlockTag] = [ t @@ -88,7 +87,8 @@ def parse_file(self, block: FileBlock): source_file = block.file assert isinstance(source_file.contents, str) original_file_path = source_file.path.original_file_path - fire_event(GenericTestFileParse(path=original_file_path)) + if flags.MACRO_DEBUGGING: + fire_event(GenericTestFileParse(path=original_file_path)) # this is really only used for error messages base_node = UnparsedMacro( @@ -96,7 +96,6 @@ def parse_file(self, block: FileBlock): original_file_path=original_file_path, package_name=self.project.project_name, raw_code=source_file.contents, - root_path=self.project.project_root, resource_type=NodeType.Macro, language="sql", ) diff --git a/core/dbt/parser/generic_test_builders.py b/core/dbt/parser/generic_test_builders.py index 3dfb541cb8f..af0282c953f 100644 --- a/core/dbt/parser/generic_test_builders.py +++ b/core/dbt/parser/generic_test_builders.py @@ -13,7 +13,7 @@ ) from dbt.clients.jinja import get_rendered, GENERIC_TEST_KWARGS_NAME -from dbt.contracts.graph.parsed import UnpatchedSourceDefinition +from dbt.contracts.graph.nodes import UnpatchedSourceDefinition from dbt.contracts.graph.unparsed import ( TestDef, UnparsedAnalysisUpdate, @@ -21,7 +21,19 @@ UnparsedNodeUpdate, UnparsedExposure, ) -from dbt.exceptions import raise_compiler_error, raise_parsing_error, UndefinedMacroException +from dbt.exceptions import ( + CustomMacroPopulatingConfigValues, + SameKeyNested, + TagNotString, + TagsNotListOfStrings, + TestArgIncludesModel, + TestArgsNotDict, + TestDefinitionDictLength, + TestInvalidType, + TestNameNotString, + UnexpectedTestNamePattern, + UndefinedMacroException, +) from dbt.parser.search import FileBlock @@ -222,9 +234,7 @@ def __init__( test_name, test_args = self.extract_test_args(test, column_name) self.args: Dict[str, Any] = test_args if "model" in self.args: - raise_compiler_error( - 'Test arguments include "model", which is a reserved argument', - ) + raise TestArgIncludesModel() self.package_name: str = package_name self.target: Testable = target @@ -232,9 +242,7 @@ def __init__( match = self.TEST_NAME_PATTERN.match(test_name) if match is None: - raise_compiler_error( - "Test name string did not match expected pattern: {}".format(test_name) - ) + raise UnexpectedTestNamePattern(test_name) groups = match.groupdict() self.name: str = groups["test_name"] @@ -251,9 +259,7 @@ def __init__( value = self.args.pop(key, None) # 'modifier' config could be either top level arg or in config if value and "config" in self.args and key in self.args["config"]: - raise_compiler_error( - "Test cannot have the same key at the top-level and in config" - ) + raise SameKeyNested() if not value and "config" in self.args: value = self.args["config"].pop(key, None) if isinstance(value, str): @@ -261,22 +267,12 @@ def __init__( try: value = get_rendered(value, render_ctx, native=True) except UndefinedMacroException as e: - - # Generic tests do not include custom macros in the Jinja - # rendering context, so this will almost always fail. As it - # currently stands, the error message is inscrutable, which - # has caused issues for some projects migrating from - # pre-0.20.0 to post-0.20.0. - # See https://github.com/dbt-labs/dbt-core/issues/4103 - # and https://github.com/dbt-labs/dbt-core/issues/5294 - raise_compiler_error( - f"The {self.target.name}.{column_name} column's " - f'"{self.name}" test references an undefined ' - f"macro in its {key} configuration argument. " - f"The macro {e.msg}.\n" - "Please note that the generic test configuration parser " - "currently does not support using custom macros to " - "populate configuration values" + raise CustomMacroPopulatingConfigValues( + target_name=self.target.name, + column_name=column_name, + name=self.name, + key=key, + err_msg=e.msg ) if value is not None: @@ -314,9 +310,7 @@ def _bad_type(self) -> TypeError: @staticmethod def extract_test_args(test, name=None) -> Tuple[str, Dict[str, Any]]: if not isinstance(test, dict): - raise_parsing_error( - "test must be dict or str, got {} (value {})".format(type(test), test) - ) + raise TestInvalidType(test) # If the test is a dictionary with top-level keys, the test name is "test_name" # and the rest are arguments @@ -330,20 +324,13 @@ def extract_test_args(test, name=None) -> Tuple[str, Dict[str, Any]]: else: test = list(test.items()) if len(test) != 1: - raise_parsing_error( - "test definition dictionary must have exactly one key, got" - " {} instead ({} keys)".format(test, len(test)) - ) + raise TestDefinitionDictLength(test) test_name, test_args = test[0] if not isinstance(test_args, dict): - raise_parsing_error( - "test arguments must be dict, got {} (value {})".format(type(test_args), test_args) - ) + raise TestArgsNotDict(test_args) if not isinstance(test_name, str): - raise_parsing_error( - "test name must be a str, got {} (value {})".format(type(test_name), test_name) - ) + raise TestNameNotString(test_name) test_args = deepcopy(test_args) if name is not None: test_args["column_name"] = name @@ -434,12 +421,10 @@ def tags(self) -> List[str]: if isinstance(tags, str): tags = [tags] if not isinstance(tags, list): - raise_compiler_error( - f"got {tags} ({type(tags)}) for tags, expected a list of strings" - ) + raise TagsNotListOfStrings(tags) for tag in tags: if not isinstance(tag, str): - raise_compiler_error(f"got {tag} ({type(tag)}) for tag, expected a str") + raise TagNotString(tag) return tags[:] def macro_name(self) -> str: diff --git a/core/dbt/parser/hooks.py b/core/dbt/parser/hooks.py index 2ac8bfda0ef..d05ea136dc5 100644 --- a/core/dbt/parser/hooks.py +++ b/core/dbt/parser/hooks.py @@ -3,7 +3,7 @@ from dbt.context.context_config import ContextConfig from dbt.contracts.files import FilePath -from dbt.contracts.graph.parsed import ParsedHookNode +from dbt.contracts.graph.nodes import HookNode from dbt.exceptions import InternalException from dbt.node_types import NodeType, RunHookType from dbt.parser.base import SimpleParser @@ -65,7 +65,7 @@ def __iter__(self) -> Iterator[HookBlock]: ) -class HookParser(SimpleParser[HookBlock, ParsedHookNode]): +class HookParser(SimpleParser[HookBlock, HookNode]): def transform(self, node): return node @@ -81,10 +81,10 @@ def get_path(self) -> FilePath: ) return path - def parse_from_dict(self, dct, validate=True) -> ParsedHookNode: + def parse_from_dict(self, dct, validate=True) -> HookNode: if validate: - ParsedHookNode.validate(dct) - return ParsedHookNode.from_dict(dct) + HookNode.validate(dct) + return HookNode.from_dict(dct) @classmethod def get_compiled_path(cls, block: HookBlock): @@ -98,7 +98,7 @@ def _create_parsetime_node( fqn: List[str], name=None, **kwargs, - ) -> ParsedHookNode: + ) -> HookNode: return super()._create_parsetime_node( block=block, diff --git a/core/dbt/parser/macros.py b/core/dbt/parser/macros.py index 4fe6b422595..7c5336b8ccf 100644 --- a/core/dbt/parser/macros.py +++ b/core/dbt/parser/macros.py @@ -4,7 +4,7 @@ from dbt.clients import jinja from dbt.contracts.graph.unparsed import UnparsedMacro -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro from dbt.contracts.files import FilePath, SourceFile from dbt.exceptions import ParsingException from dbt.events.functions import fire_event @@ -13,9 +13,10 @@ from dbt.parser.base import BaseParser from dbt.parser.search import FileBlock, filesystem_search from dbt.utils import MACRO_PREFIX +from dbt import flags -class MacroParser(BaseParser[ParsedMacro]): +class MacroParser(BaseParser[Macro]): # This is only used when creating a MacroManifest separate # from the normal parsing flow. def get_paths(self) -> List[FilePath]: @@ -31,23 +32,20 @@ def resource_type(self) -> NodeType: def get_compiled_path(cls, block: FileBlock): return block.path.relative_path - def parse_macro( - self, block: jinja.BlockTag, base_node: UnparsedMacro, name: str - ) -> ParsedMacro: + def parse_macro(self, block: jinja.BlockTag, base_node: UnparsedMacro, name: str) -> Macro: unique_id = self.generate_unique_id(name) - return ParsedMacro( + return Macro( path=base_node.path, macro_sql=block.full_block, original_file_path=base_node.original_file_path, package_name=base_node.package_name, - root_path=base_node.root_path, resource_type=base_node.resource_type, name=name, unique_id=unique_id, ) - def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[ParsedMacro]: + def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[Macro]: try: blocks: List[jinja.BlockTag] = [ t @@ -95,7 +93,8 @@ def parse_file(self, block: FileBlock): source_file = block.file assert isinstance(source_file.contents, str) original_file_path = source_file.path.original_file_path - fire_event(MacroFileParse(path=original_file_path)) + if flags.MACRO_DEBUGGING: + fire_event(MacroFileParse(path=original_file_path)) # this is really only used for error messages base_node = UnparsedMacro( @@ -103,7 +102,6 @@ def parse_file(self, block: FileBlock): original_file_path=original_file_path, package_name=self.project.project_name, raw_code=source_file.contents, - root_path=self.project.project_root, resource_type=NodeType.Macro, language="sql", ) diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index 29f93b5bae2..787b70cfeaf 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -18,7 +18,7 @@ get_adapter_package_names, ) from dbt.helper_types import PathSet -from dbt.events.functions import fire_event, get_invocation_id +from dbt.events.functions import fire_event, get_invocation_id, warn_or_error from dbt.events.types import ( PartialParsingFullReparseBecauseOfError, PartialParsingExceptionFile, @@ -35,10 +35,10 @@ PartialParsingNotEnabled, ParsedFileLoadFailed, PartialParseSaveFileNotFound, - InvalidDisabledSourceInTestNode, - InvalidRefInTestNode, + InvalidDisabledTargetInTestNode, PartialParsingProjectEnvVarsChanged, PartialParsingProfileEnvVarsChanged, + NodeNotFoundOrDisabled, ) from dbt.logger import DbtProcessState from dbt.node_types import NodeType @@ -53,7 +53,6 @@ from dbt.contracts.files import FileHash, ParseFileType, SchemaSourceFile from dbt.parser.read_files import read_files, load_source_file from dbt.parser.partial import PartialParsing, special_override_macros -from dbt.contracts.graph.compiled import ManifestNode from dbt.contracts.graph.manifest import ( Manifest, Disabled, @@ -61,22 +60,18 @@ ManifestStateCheck, ParsingInfo, ) -from dbt.contracts.graph.parsed import ( - ParsedSourceDefinition, - ParsedNode, - ParsedMacro, +from dbt.contracts.graph.nodes import ( + SourceDefinition, + Macro, ColumnInfo, - ParsedExposure, - ParsedMetric, + Exposure, + Metric, + SeedNode, + ManifestNode, + ResultNode, ) from dbt.contracts.util import Writable -from dbt.exceptions import ( - ref_target_not_found, - get_target_not_found_or_disabled_msg, - target_not_found, - get_not_found_or_disabled_msg, - warn_or_error, -) +from dbt.exceptions import TargetNotFound, AmbiguousAlias from dbt.parser.base import Parser from dbt.parser.analysis import AnalysisParser from dbt.parser.generic_test import GenericTestParser @@ -90,7 +85,6 @@ from dbt.parser.seeds import SeedParser from dbt.parser.snapshots import SnapshotParser from dbt.parser.sources import SourcePatcher -from dbt.ui import warning_tag from dbt.version import __version__ from dbt.dataclass_schema import StrEnum, dbtClassMixin @@ -371,7 +365,7 @@ def load(self): self._perf_info.parse_project_elapsed = time.perf_counter() - start_parse_projects # patch_sources converts the UnparsedSourceDefinitions in the - # Manifest.sources to ParsedSourceDefinition via 'patch_source' + # Manifest.sources to SourceDefinition via 'patch_source' # in SourcePatcher start_patch = time.perf_counter() patcher = SourcePatcher(self.root_project, self.manifest) @@ -542,7 +536,9 @@ def macro_depends_on(self): macro.depends_on.add_macro(dep_macro_id) # will check for dupes def write_manifest_for_partial_parse(self): - path = os.path.join(self.root_project.target_path, PARTIAL_PARSE_FILE_NAME) + path = os.path.join( + self.root_project.project_root, self.root_project.target_path, PARTIAL_PARSE_FILE_NAME + ) try: # This shouldn't be necessary, but we have gotten bug reports (#3757) of the # saved manifest not matching the code version. @@ -708,7 +704,7 @@ def build_manifest_state_check(self): vars_hash = FileHash.from_contents( "\x00".join( [ - getattr(config.args, "vars", "{}") or "{}", + str(getattr(config.args, "vars", "{}") or "{}"), getattr(config.args, "profile", "") or "", getattr(config.args, "target", "") or "", __version__, @@ -856,6 +852,10 @@ def process_metrics(self, config: RuntimeConfig): if metric.created_at < self.started_at: continue _process_metrics_for_node(self.manifest, current_project, metric) + for exposure in self.manifest.exposures.values(): + if exposure.created_at < self.started_at: + continue + _process_metrics_for_node(self.manifest, current_project, exposure) # nodes: node and column descriptions # sources: source and table descriptions, column descriptions @@ -920,7 +920,7 @@ def process_sources(self, current_project: str): for node in self.manifest.nodes.values(): if node.resource_type == NodeType.Source: continue - assert not isinstance(node, ParsedSourceDefinition) + assert not isinstance(node, SourceDefinition) if node.created_at < self.started_at: continue _process_sources_for_node(self.manifest, current_project, node) @@ -955,65 +955,43 @@ def process_nodes(self): self.manifest.rebuild_ref_lookup() -def invalid_ref_fail_unless_test(node, target_model_name, target_model_package, disabled): - - if node.resource_type == NodeType.Test: - msg = get_target_not_found_or_disabled_msg( - node=node, - target_name=target_model_name, - target_package=target_model_package, - disabled=disabled, - ) - if disabled: - fire_event(InvalidRefInTestNode(msg=msg)) - else: - warn_or_error(msg, log_fmt=warning_tag("{}")) - else: - ref_target_not_found( - node, - target_model_name, - target_model_package, - disabled=disabled, - ) - - -def invalid_source_fail_unless_test(node, target_name, target_table_name, disabled): +def invalid_target_fail_unless_test( + node, + target_name: str, + target_kind: str, + target_package: Optional[str] = None, + disabled: Optional[bool] = None, +): if node.resource_type == NodeType.Test: - msg = get_not_found_or_disabled_msg( - node=node, - target_name=f"{target_name}.{target_table_name}", - target_kind="source", - disabled=disabled, - ) if disabled: - fire_event(InvalidDisabledSourceInTestNode(msg=msg)) + fire_event( + InvalidDisabledTargetInTestNode( + resource_type_title=node.resource_type.title(), + unique_id=node.unique_id, + original_file_path=node.original_file_path, + target_kind=target_kind, + target_name=target_name, + target_package=target_package if target_package else "", + ) + ) else: - warn_or_error(msg, log_fmt=warning_tag("{}")) - else: - target_not_found( - node=node, - target_name=f"{target_name}.{target_table_name}", - target_kind="source", - disabled=disabled, - ) - - -def invalid_metric_fail_unless_test(node, target_metric_name, target_metric_package, disabled): - - if node.resource_type == NodeType.Test: - msg = get_target_not_found_or_disabled_msg( - node=node, - target_name=target_metric_name, - target_package=target_metric_package, - disabled=disabled, - ) - warn_or_error(msg, log_fmt=warning_tag("{}")) + warn_or_error( + NodeNotFoundOrDisabled( + original_file_path=node.original_file_path, + unique_id=node.unique_id, + resource_type_title=node.resource_type.title(), + target_name=target_name, + target_kind=target_kind, + target_package=target_package if target_package else "", + disabled=str(disabled), + ) + ) else: - target_not_found( + raise TargetNotFound( node=node, - target_name=target_metric_name, - target_kind="metric", - target_package=target_metric_package, + target_name=target_name, + target_kind=target_kind, + target_package=target_package, disabled=disabled, ) @@ -1037,11 +1015,11 @@ def _check_resource_uniqueness( existing_node = names_resources.get(name) if existing_node is not None: - dbt.exceptions.raise_duplicate_resource_name(existing_node, node) + raise dbt.exceptions.DuplicateResourceName(existing_node, node) existing_alias = alias_resources.get(full_node_name) if existing_alias is not None: - dbt.exceptions.raise_ambiguous_alias(existing_alias, node, full_node_name) + raise AmbiguousAlias(node_1=existing_alias, node_2=node, duped_name=full_node_name) names_resources[name] = node alias_resources[full_node_name] = node @@ -1061,7 +1039,7 @@ def _check_manifest(manifest: Manifest, config: RuntimeConfig) -> None: def _get_node_column(node, column_name): - """Given a ParsedNode, add some fields that might be missing. Return a + """Given a ManifestNode, add some fields that might be missing. Return a reference to the dict that refers to the given column, creating it if it doesn't yet exist. """ @@ -1074,7 +1052,7 @@ def _get_node_column(node, column_name): return column -DocsContextCallback = Callable[[Union[ParsedNode, ParsedSourceDefinition]], Dict[str, Any]] +DocsContextCallback = Callable[[ResultNode], Dict[str, Any]] # node and column descriptions @@ -1090,7 +1068,7 @@ def _process_docs_for_node( # source and table descriptions, column descriptions def _process_docs_for_source( context: Dict[str, Any], - source: ParsedSourceDefinition, + source: SourceDefinition, ): table_description = source.description source_description = source.source_description @@ -1106,27 +1084,22 @@ def _process_docs_for_source( # macro argument descriptions -def _process_docs_for_macro(context: Dict[str, Any], macro: ParsedMacro) -> None: +def _process_docs_for_macro(context: Dict[str, Any], macro: Macro) -> None: macro.description = get_rendered(macro.description, context) for arg in macro.arguments: arg.description = get_rendered(arg.description, context) # exposure descriptions -def _process_docs_for_exposure(context: Dict[str, Any], exposure: ParsedExposure) -> None: +def _process_docs_for_exposure(context: Dict[str, Any], exposure: Exposure) -> None: exposure.description = get_rendered(exposure.description, context) -def _process_docs_for_metrics(context: Dict[str, Any], metric: ParsedMetric) -> None: +def _process_docs_for_metrics(context: Dict[str, Any], metric: Metric) -> None: metric.description = get_rendered(metric.description, context) -# TODO: this isn't actually referenced anywhere? -def _process_derived_metrics(context: Dict[str, Any], metric: ParsedMetric) -> None: - metric.description = get_rendered(metric.description, context) - - -def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposure: ParsedExposure): +def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposure: Exposure): """Given a manifest and exposure in that manifest, process its refs""" for ref in exposure.refs: target_model: Optional[Union[Disabled, ManifestNode]] = None @@ -1153,10 +1126,11 @@ def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposur # This may raise. Even if it doesn't, we don't want to add # this exposure to the graph b/c there is no destination exposure exposure.config.enabled = False - invalid_ref_fail_unless_test( - exposure, - target_model_name, - target_model_package, + invalid_target_fail_unless_test( + node=exposure, + target_name=target_model_name, + target_kind="node", + target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), ) @@ -1168,7 +1142,7 @@ def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposur manifest.update_exposure(exposure) -def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: ParsedMetric): +def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: Metric): """Given a manifest and a metric in that manifest, process its refs""" for ref in metric.refs: target_model: Optional[Union[Disabled, ManifestNode]] = None @@ -1195,13 +1169,13 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: P # This may raise. Even if it doesn't, we don't want to add # this metric to the graph b/c there is no destination metric metric.config.enabled = False - invalid_ref_fail_unless_test( - metric, - target_model_name, - target_model_package, + invalid_target_fail_unless_test( + node=metric, + target_name=target_model_name, + target_kind="node", + target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), ) - continue target_model_id = target_model.unique_id @@ -1211,11 +1185,17 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: P def _process_metrics_for_node( - manifest: Manifest, current_project: str, node: Union[ManifestNode, ParsedMetric] + manifest: Manifest, + current_project: str, + node: Union[ManifestNode, Metric, Exposure], ): """Given a manifest and a node in that manifest, process its metrics""" + + if isinstance(node, SeedNode): + return + for metric in node.metrics: - target_metric: Optional[Union[Disabled, ParsedMetric]] = None + target_metric: Optional[Union[Disabled, Metric]] = None target_metric_name: str target_metric_package: Optional[str] = None @@ -1239,13 +1219,13 @@ def _process_metrics_for_node( # This may raise. Even if it doesn't, we don't want to add # this node to the graph b/c there is no destination node node.config.enabled = False - invalid_metric_fail_unless_test( - node, - target_metric_name, - target_metric_package, + invalid_target_fail_unless_test( + node=node, + target_name=target_metric_name, + target_kind="source", + target_package=target_metric_package, disabled=(isinstance(target_metric, Disabled)), ) - continue target_metric_id = target_metric.unique_id @@ -1255,6 +1235,10 @@ def _process_metrics_for_node( def _process_refs_for_node(manifest: Manifest, current_project: str, node: ManifestNode): """Given a manifest and a node in that manifest, process its refs""" + + if isinstance(node, SeedNode): + return + for ref in node.refs: target_model: Optional[Union[Disabled, ManifestNode]] = None target_model_name: str @@ -1280,13 +1264,13 @@ def _process_refs_for_node(manifest: Manifest, current_project: str, node: Manif # This may raise. Even if it doesn't, we don't want to add # this node to the graph b/c there is no destination node node.config.enabled = False - invalid_ref_fail_unless_test( - node, - target_model_name, - target_model_package, + invalid_target_fail_unless_test( + node=node, + target_name=target_model_name, + target_kind="node", + target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), ) - continue target_model_id = target_model.unique_id @@ -1299,10 +1283,8 @@ def _process_refs_for_node(manifest: Manifest, current_project: str, node: Manif manifest.update_node(node) -def _process_sources_for_exposure( - manifest: Manifest, current_project: str, exposure: ParsedExposure -): - target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None +def _process_sources_for_exposure(manifest: Manifest, current_project: str, exposure: Exposure): + target_source: Optional[Union[Disabled, SourceDefinition]] = None for source_name, table_name in exposure.sources: target_source = manifest.resolve_source( source_name, @@ -1312,8 +1294,11 @@ def _process_sources_for_exposure( ) if target_source is None or isinstance(target_source, Disabled): exposure.config.enabled = False - invalid_source_fail_unless_test( - exposure, source_name, table_name, disabled=(isinstance(target_source, Disabled)) + invalid_target_fail_unless_test( + node=exposure, + target_name=f"{source_name}.{table_name}", + target_kind="source", + disabled=(isinstance(target_source, Disabled)), ) continue target_source_id = target_source.unique_id @@ -1321,8 +1306,8 @@ def _process_sources_for_exposure( manifest.update_exposure(exposure) -def _process_sources_for_metric(manifest: Manifest, current_project: str, metric: ParsedMetric): - target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None +def _process_sources_for_metric(manifest: Manifest, current_project: str, metric: Metric): + target_source: Optional[Union[Disabled, SourceDefinition]] = None for source_name, table_name in metric.sources: target_source = manifest.resolve_source( source_name, @@ -1332,8 +1317,11 @@ def _process_sources_for_metric(manifest: Manifest, current_project: str, metric ) if target_source is None or isinstance(target_source, Disabled): metric.config.enabled = False - invalid_source_fail_unless_test( - metric, source_name, table_name, disabled=(isinstance(target_source, Disabled)) + invalid_target_fail_unless_test( + node=metric, + target_name=f"{source_name}.{table_name}", + target_kind="source", + disabled=(isinstance(target_source, Disabled)), ) continue target_source_id = target_source.unique_id @@ -1342,7 +1330,11 @@ def _process_sources_for_metric(manifest: Manifest, current_project: str, metric def _process_sources_for_node(manifest: Manifest, current_project: str, node: ManifestNode): - target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None + + if isinstance(node, SeedNode): + return + + target_source: Optional[Union[Disabled, SourceDefinition]] = None for source_name, table_name in node.sources: target_source = manifest.resolve_source( source_name, @@ -1354,8 +1346,11 @@ def _process_sources_for_node(manifest: Manifest, current_project: str, node: Ma if target_source is None or isinstance(target_source, Disabled): # this folows the same pattern as refs node.config.enabled = False - invalid_source_fail_unless_test( - node, source_name, table_name, disabled=(isinstance(target_source, Disabled)) + invalid_target_fail_unless_test( + node=node, + target_name=f"{source_name}.{table_name}", + target_kind="source", + disabled=(isinstance(target_source, Disabled)), ) continue target_source_id = target_source.unique_id @@ -1365,7 +1360,7 @@ def _process_sources_for_node(manifest: Manifest, current_project: str, node: Ma # This is called in task.rpc.sql_commands when a "dynamic" node is # created in the manifest, in 'add_refs' -def process_macro(config: RuntimeConfig, manifest: Manifest, macro: ParsedMacro) -> None: +def process_macro(config: RuntimeConfig, manifest: Manifest, macro: Macro) -> None: ctx = generate_runtime_docs_context( config, macro, diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py index aaf6a0d016e..39bb18be714 100644 --- a/core/dbt/parser/models.py +++ b/core/dbt/parser/models.py @@ -1,6 +1,6 @@ from copy import deepcopy from dbt.context.context_config import ContextConfig -from dbt.contracts.graph.parsed import ParsedModelNode +from dbt.contracts.graph.nodes import ModelNode import dbt.flags as flags from dbt.events.functions import fire_event from dbt.events.types import ( @@ -29,8 +29,13 @@ # New for Python models :p import ast from dbt.dataclass_schema import ValidationError -from dbt.exceptions import ParsingException, validator_error_message, UndefinedMacroException - +from dbt.exceptions import ( + InvalidModelConfig, + ParsingException, + PythonLiteralEval, + PythonParsingException, + UndefinedMacroException, +) dbt_function_key_words = set(["ref", "source", "config", "get"]) dbt_function_full_names = set(["dbt.ref", "dbt.source", "dbt.config", "dbt.config.get"]) @@ -61,7 +66,11 @@ def visit_FunctionDef(self, node: ast.FunctionDef) -> None: def check_error(self, node): if self.num_model_def != 1: - raise ParsingException("dbt only allow one model defined per python file", node=node) + raise ParsingException( + f"dbt allows exactly one model defined per python file, found {self.num_model_def}", + node=node, + ) + if len(self.dbt_errors) != 0: raise ParsingException("\n".join(self.dbt_errors), node=node) @@ -87,12 +96,7 @@ def _safe_eval(self, node): try: return ast.literal_eval(node) except (SyntaxError, ValueError, TypeError, MemoryError, RecursionError) as exc: - msg = validator_error_message( - f"Error when trying to literal_eval an arg to dbt.ref(), dbt.source(), dbt.config() or dbt.config.get() \n{exc}\n" - "https://docs.python.org/3/library/ast.html#ast.literal_eval\n" - "In dbt python model, `dbt.ref`, `dbt.source`, `dbt.config`, `dbt.config.get` function args only support Python literal structures" - ) - raise ParsingException(msg, node=self.dbt_node) from exc + raise PythonLiteralEval(exc, node=self.dbt_node) from exc def _get_call_literals(self, node): # List of literals @@ -177,11 +181,11 @@ def verify_python_model_code(node): raise ParsingException("No jinja in python model code is allowed", node=node) -class ModelParser(SimpleSQLParser[ParsedModelNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedModelNode: +class ModelParser(SimpleSQLParser[ModelNode]): + def parse_from_dict(self, dct, validate=True) -> ModelNode: if validate: - ParsedModelNode.validate(dct) - return ParsedModelNode.from_dict(dct) + ModelNode.validate(dct) + return ModelNode.from_dict(dct) @property def resource_type(self) -> NodeType: @@ -192,32 +196,54 @@ def get_compiled_path(cls, block: FileBlock): return block.path.relative_path def parse_python_model(self, node, config, context): + config_keys_used = [] + config_keys_defaults = [] + try: tree = ast.parse(node.raw_code, filename=node.original_file_path) except SyntaxError as exc: - msg = validator_error_message(exc) - raise ParsingException(f"{msg}\n{exc.text}", node=node) from exc - - # We are doing a validator and a parser because visit_FunctionDef in parser - # would actually make the parser not doing the visit_Calls any more - dbtValidator = PythonValidationVisitor() - dbtValidator.visit(tree) - dbtValidator.check_error(node) - - dbtParser = PythonParseVisitor(node) - dbtParser.visit(tree) - config_keys_used = [] - for (func, args, kwargs) in dbtParser.dbt_function_calls: - if func == "get": - config_keys_used.append(args[0]) - continue + raise PythonParsingException(exc, node=node) from exc + + # Only parse if AST tree has instructions in body + if tree.body: + # We are doing a validator and a parser because visit_FunctionDef in parser + # would actually make the parser not doing the visit_Calls any more + dbt_validator = PythonValidationVisitor() + dbt_validator.visit(tree) + dbt_validator.check_error(node) + + dbt_parser = PythonParseVisitor(node) + dbt_parser.visit(tree) + + for (func, args, kwargs) in dbt_parser.dbt_function_calls: + if func == "get": + num_args = len(args) + if num_args == 0: + raise ParsingException( + "dbt.config.get() requires at least one argument", + node=node, + ) + if num_args > 2: + raise ParsingException( + f"dbt.config.get() takes at most 2 arguments ({num_args} given)", + node=node, + ) + key = args[0] + default_value = args[1] if num_args == 2 else None + config_keys_used.append(key) + config_keys_defaults.append(default_value) + continue + + context[func](*args, **kwargs) - context[func](*args, **kwargs) if config_keys_used: # this is being used in macro build_config_dict - context["config"](config_keys_used=config_keys_used) + context["config"]( + config_keys_used=config_keys_used, + config_keys_defaults=config_keys_defaults, + ) - def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: + def render_update(self, node: ModelNode, config: ContextConfig) -> None: self.manifest._parsing_info.static_analysis_path_count += 1 if node.language == ModelLanguage.python: @@ -229,8 +255,7 @@ def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: except ValidationError as exc: # we got a ValidationError - probably bad types in config() - msg = validator_error_message(exc) - raise ParsingException(msg, node=node) from exc + raise InvalidModelConfig(exc, node=node) from exc return elif not flags.STATIC_PARSER: @@ -262,9 +287,9 @@ def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: # top-level declaration of variables statically_parsed: Optional[Union[str, Dict[str, List[Any]]]] = None experimental_sample: Optional[Union[str, Dict[str, List[Any]]]] = None - exp_sample_node: Optional[ParsedModelNode] = None + exp_sample_node: Optional[ModelNode] = None exp_sample_config: Optional[ContextConfig] = None - jinja_sample_node: Optional[ParsedModelNode] = None + jinja_sample_node: Optional[ModelNode] = None jinja_sample_config: Optional[ContextConfig] = None result: List[str] = [] @@ -365,9 +390,7 @@ def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: } ) - def run_static_parser( - self, node: ParsedModelNode - ) -> Optional[Union[str, Dict[str, List[Any]]]]: + def run_static_parser(self, node: ModelNode) -> Optional[Union[str, Dict[str, List[Any]]]]: # if any banned macros have been overridden by the user, we cannot use the static parser. if self._has_banned_macro(node): # this log line is used for integration testing. If you change @@ -389,7 +412,7 @@ def run_static_parser( return "cannot_parse" def run_experimental_parser( - self, node: ParsedModelNode + self, node: ModelNode ) -> Optional[Union[str, Dict[str, List[Any]]]]: # if any banned macros have been overridden by the user, we cannot use the static parser. if self._has_banned_macro(node): @@ -415,7 +438,7 @@ def run_experimental_parser( return "cannot_parse" # checks for banned macros - def _has_banned_macro(self, node: ParsedModelNode) -> bool: + def _has_banned_macro(self, node: ModelNode) -> bool: # first check if there is a banned macro defined in scope for this model file root_project_name = self.root_project.project_name project_name = node.package_name @@ -435,9 +458,7 @@ def _has_banned_macro(self, node: ParsedModelNode) -> bool: # this method updates the model node rendered and unrendered config as well # as the node object. Used to populate these values when circumventing jinja # rendering like the static parser. - def populate( - self, node: ParsedModelNode, config: ContextConfig, statically_parsed: Dict[str, Any] - ): + def populate(self, node: ModelNode, config: ContextConfig, statically_parsed: Dict[str, Any]): # manually fit configs in config._config_call_dict = _get_config_call_dict(statically_parsed) @@ -485,9 +506,9 @@ def _shift_sources(static_parser_result: Dict[str, List[Any]]) -> Dict[str, List # returns a list of string codes to be sent as a tracking event def _get_exp_sample_result( - sample_node: ParsedModelNode, + sample_node: ModelNode, sample_config: ContextConfig, - node: ParsedModelNode, + node: ModelNode, config: ContextConfig, ) -> List[str]: result: List[Tuple[int, str]] = _get_sample_result(sample_node, sample_config, node, config) @@ -501,9 +522,9 @@ def process(codemsg): # returns a list of string codes to be sent as a tracking event def _get_stable_sample_result( - sample_node: ParsedModelNode, + sample_node: ModelNode, sample_config: ContextConfig, - node: ParsedModelNode, + node: ModelNode, config: ContextConfig, ) -> List[str]: result: List[Tuple[int, str]] = _get_sample_result(sample_node, sample_config, node, config) @@ -518,9 +539,9 @@ def process(codemsg): # returns a list of string codes that need a single digit prefix to be prepended # before being sent as a tracking event def _get_sample_result( - sample_node: ParsedModelNode, + sample_node: ModelNode, sample_config: ContextConfig, - node: ParsedModelNode, + node: ModelNode, config: ContextConfig, ) -> List[Tuple[int, str]]: result: List[Tuple[int, str]] = [] diff --git a/core/dbt/parser/partial.py b/core/dbt/parser/partial.py index 1a8c7e8193e..63ef33429c4 100644 --- a/core/dbt/parser/partial.py +++ b/core/dbt/parser/partial.py @@ -873,7 +873,7 @@ def delete_schema_source(self, schema_file, source_dict): source_name = source_dict["name"] # There may be multiple sources for each source dict, since # there will be a separate source node for each table. - # ParsedSourceDefinition name = table name, dict name is source_name + # SourceDefinition name = table name, dict name is source_name sources = schema_file.sources.copy() for unique_id in sources: if unique_id in self.saved_manifest.sources: diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 8b22427cb39..b5fd8558889 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -27,14 +27,14 @@ from dbt.context.macro_resolver import MacroResolver from dbt.contracts.files import FileHash, SchemaSourceFile from dbt.contracts.graph.model_config import MetricConfig, ExposureConfig -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( ParsedNodePatch, ColumnInfo, - ParsedGenericTestNode, + GenericTestNode, ParsedMacroPatch, UnpatchedSourceDefinition, - ParsedExposure, - ParsedMetric, + Exposure, + Metric, ) from dbt.contracts.graph.unparsed import ( HasColumnDocs, @@ -50,19 +50,25 @@ UnparsedSourceDefinition, ) from dbt.exceptions import ( - warn_invalid_patch, - validator_error_message, + CompilationException, + DuplicateMacroPatchName, + DuplicatePatchPath, + DuplicateSourcePatchName, JSONValidationException, - raise_invalid_property_yml_version, - ValidationException, - ParsingException, - raise_duplicate_patch_name, - raise_duplicate_macro_patch_name, InternalException, - raise_duplicate_source_patch_name, - warn_or_error, - CompilationException, + InvalidSchemaConfig, + InvalidTestConfig, + ParsingException, + PropertyYMLInvalidTag, + PropertyYMLMissingVersion, + PropertyYMLVersionNotInt, + ValidationException, + YamlLoadFailure, + YamlParseDictFailure, + YamlParseListFailure, ) +from dbt.events.functions import warn_or_error +from dbt.events.types import WrongResourceSchemaFile, NoNodeForYamlKey, MacroPatchNotFound from dbt.node_types import NodeType from dbt.parser.base import SimpleParser from dbt.parser.search import FileBlock @@ -74,7 +80,6 @@ TestBlock, Testable, ) -from dbt.ui import warning_tag from dbt.utils import get_pseudo_test_path, coerce_dict_str @@ -92,34 +97,13 @@ ) -def error_context( - path: str, - key: str, - data: Any, - cause: Union[str, ValidationException, JSONValidationException], -) -> str: - """Provide contextual information about an error while parsing""" - if isinstance(cause, str): - reason = cause - elif isinstance(cause, ValidationError): - reason = validator_error_message(cause) - else: - reason = cause.msg - return "Invalid {key} config given in {path} @ {key}: {data} - {reason}".format( - key=key, path=path, data=data, reason=reason - ) - - def yaml_from_file(source_file: SchemaSourceFile) -> Dict[str, Any]: """If loading the yaml fails, raise an exception.""" path = source_file.path.relative_path try: return load_yaml_text(source_file.contents, source_file.path) except ValidationException as e: - reason = validator_error_message(e) - raise ParsingException( - "Error reading {}: {} - {}".format(source_file.project_name, path, reason) - ) + raise YamlLoadFailure(source_file.project_name, path, e) class ParserRef: @@ -169,7 +153,7 @@ def _trimmed(inp: str) -> str: return inp[:44] + "..." + inp[-3:] -class SchemaParser(SimpleParser[GenericTestBlock, ParsedGenericTestNode]): +class SchemaParser(SimpleParser[GenericTestBlock, GenericTestNode]): def __init__( self, project, @@ -196,10 +180,10 @@ def get_compiled_path(cls, block: FileBlock) -> str: def resource_type(self) -> NodeType: return NodeType.Test - def parse_from_dict(self, dct, validate=True) -> ParsedGenericTestNode: + def parse_from_dict(self, dct, validate=True) -> GenericTestNode: if validate: - ParsedGenericTestNode.validate(dct) - return ParsedGenericTestNode.from_dict(dct) + GenericTestNode.validate(dct) + return GenericTestNode.from_dict(dct) def parse_column_tests(self, block: TestBlock, column: UnparsedColumn) -> None: if not column.tests: @@ -220,7 +204,7 @@ def create_test_node( test_metadata: Dict[str, Any], file_key_name: str, column_name: Optional[str], - ) -> ParsedGenericTestNode: + ) -> GenericTestNode: HASH_LENGTH = 10 @@ -245,7 +229,6 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List "database": self.default_database, "fqn": fqn, "name": name, - "root_path": self.project.project_root, "resource_type": self.resource_type, "tags": tags, "path": path, @@ -261,10 +244,9 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List "file_key_name": file_key_name, } try: - ParsedGenericTestNode.validate(dct) - return ParsedGenericTestNode.from_dict(dct) + GenericTestNode.validate(dct) + return GenericTestNode.from_dict(dct) except ValidationError as exc: - msg = validator_error_message(exc) # this is a bit silly, but build an UnparsedNode just for error # message reasons node = self._create_error_node( @@ -273,7 +255,7 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List original_file_path=target.original_file_path, raw_code=raw_code, ) - raise ParsingException(msg, node=node) from exc + raise InvalidTestConfig(exc, node) # lots of time spent in this method def _parse_generic_test( @@ -283,7 +265,7 @@ def _parse_generic_test( tags: List[str], column_name: Optional[str], schema_file_id: str, - ) -> ParsedGenericTestNode: + ) -> GenericTestNode: try: builder = TestBuilder( test=test, @@ -415,10 +397,9 @@ def render_test_update(self, node, config, builder, schema_file_id): # env_vars should have been updated in the context env_var method except ValidationError as exc: # we got a ValidationError - probably bad types in config() - msg = validator_error_message(exc) - raise ParsingException(msg, node=node) from exc + raise InvalidSchemaConfig(exc, node=node) from exc - def parse_node(self, block: GenericTestBlock) -> ParsedGenericTestNode: + def parse_node(self, block: GenericTestBlock) -> GenericTestNode: """In schema parsing, we rewrite most of the part of parse_node that builds the initial node to be parsed, but rendering is basically the same @@ -433,7 +414,7 @@ def parse_node(self, block: GenericTestBlock) -> ParsedGenericTestNode: self.add_test_node(block, node) return node - def add_test_node(self, block: GenericTestBlock, node: ParsedGenericTestNode): + def add_test_node(self, block: GenericTestBlock, node: GenericTestNode): test_from = {"key": block.target.yaml_key, "name": block.target.name} if node.config.enabled: self.manifest.add_node(block.file, node, test_from) @@ -442,7 +423,7 @@ def add_test_node(self, block: GenericTestBlock, node: ParsedGenericTestNode): def render_with_context( self, - node: ParsedGenericTestNode, + node: GenericTestNode, config: ContextConfig, ) -> None: """Given the parsed node and a ContextConfig to use during @@ -556,25 +537,16 @@ def parse_file(self, block: FileBlock, dct: Dict = None) -> None: def check_format_version(file_path, yaml_dct) -> None: if "version" not in yaml_dct: - raise_invalid_property_yml_version( - file_path, - "the yml property file {} is missing a version tag".format(file_path), - ) + raise PropertyYMLMissingVersion(file_path) version = yaml_dct["version"] # if it's not an integer, the version is malformed, or not # set. Either way, only 'version: 2' is supported. if not isinstance(version, int): - raise_invalid_property_yml_version( - file_path, - "its 'version:' tag must be an integer (e.g. version: 2)." - " {} is not an integer".format(version), - ) + raise PropertyYMLVersionNotInt(file_path, version) + if version != 2: - raise_invalid_property_yml_version( - file_path, - "its 'version:' tag is set to {}. Only 2 is supported".format(version), - ) + raise PropertyYMLInvalidTag(file_path, version) Parsed = TypeVar("Parsed", UnpatchedSourceDefinition, ParsedNodePatch, ParsedMacroPatch) @@ -635,8 +607,9 @@ def get_key_dicts(self) -> Iterable[Dict[str, Any]]: # check that entry is a dict and that all dict values # are strings if coerce_dict_str(entry) is None: - msg = error_context(path, self.key, data, "expected a dict with string keys") - raise ParsingException(msg) + raise YamlParseListFailure( + path, self.key, data, "expected a dict with string keys" + ) if "name" not in entry: raise ParsingException("Entry did not contain a name") @@ -683,8 +656,7 @@ def _target_from_dict(self, cls: Type[T], data: Dict[str, Any]) -> T: cls.validate(data) return cls.from_dict(data) except (ValidationError, JSONValidationException) as exc: - msg = error_context(path, self.key, data, exc) - raise ParsingException(msg) from exc + raise YamlParseDictFailure(path, self.key, data, exc) # The other parse method returns TestBlocks. This one doesn't. # This takes the yaml dictionaries in 'sources' keys and uses them @@ -705,7 +677,7 @@ def parse(self) -> List[TestBlock]: # source patches must be unique key = (patch.overrides, patch.name) if key in self.manifest.source_patches: - raise_duplicate_source_patch_name(patch, self.manifest.source_patches[key]) + raise DuplicateSourcePatchName(patch, self.manifest.source_patches[key]) self.manifest.source_patches[key] = patch source_file.source_patches.append(key) else: @@ -729,11 +701,11 @@ def add_source_definitions(self, source: UnparsedSourceDefinition) -> None: table=table, path=original_file_path, original_file_path=original_file_path, - root_path=self.project.project_root, package_name=package_name, unique_id=unique_id, resource_type=NodeType.Source, fqn=fqn, + name=f"{source.name}_{table.name}", ) self.manifest.add_source(self.yaml.file, source_def) @@ -809,8 +781,7 @@ def get_unparsed_target(self) -> Iterable[NonSourceTarget]: self.normalize_docs_attribute(data, path) node = self._target_type().from_dict(data) except (ValidationError, JSONValidationException) as exc: - msg = error_context(path, self.key, data, exc) - raise ParsingException(msg) from exc + raise YamlParseDictFailure(path, self.key, data, exc) else: yield node @@ -873,7 +844,15 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: if unique_id: resource_type = NodeType(unique_id.split(".")[0]) if resource_type.pluralize() != patch.yaml_key: - warn_invalid_patch(patch, resource_type) + warn_or_error( + WrongResourceSchemaFile( + patch_name=patch.name, + resource_type=resource_type, + plural_resource_type=resource_type.pluralize(), + yaml_key=patch.yaml_key, + file_path=patch.original_file_path, + ) + ) return elif patch.yaml_key == "analyses": @@ -912,12 +891,13 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: node.patch(patch) else: - msg = ( - f"Did not find matching node for patch with name '{patch.name}' " - f"in the '{patch.yaml_key}' section of " - f"file '{source_file.path.original_file_path}'" + warn_or_error( + NoNodeForYamlKey( + patch_name=patch.name, + yaml_key=patch.yaml_key, + file_path=source_file.path.original_file_path, + ) ) - warn_or_error(msg, log_fmt=warning_tag("{}")) return # patches can't be overwritten @@ -925,7 +905,7 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: if node: if node.patch_path: package_name, existing_file_path = node.patch_path.split("://") - raise_duplicate_patch_name(patch, existing_file_path) + raise DuplicatePatchPath(patch, existing_file_path) source_file.append_patch(patch.yaml_key, node.unique_id) # re-calculate the node config with the patch config. Always do this @@ -977,12 +957,11 @@ def parse_patch(self, block: TargetBlock[UnparsedMacroUpdate], refs: ParserRef) unique_id = f"macro.{patch.package_name}.{patch.name}" macro = self.manifest.macros.get(unique_id) if not macro: - msg = f'Found patch for macro "{patch.name}" ' f"which was not found" - warn_or_error(msg, log_fmt=warning_tag("{}")) + warn_or_error(MacroPatchNotFound(patch_name=patch.name)) return if macro.patch_path: package_name, existing_file_path = macro.patch_path.split("://") - raise_duplicate_macro_patch_name(patch, existing_file_path) + raise DuplicateMacroPatchName(patch, existing_file_path) source_file.macro_patches[patch.name] = unique_id macro.patch(patch) @@ -1022,9 +1001,9 @@ def parse_exposure(self, unparsed: UnparsedExposure): f"Calculated a {type(config)} for an exposure, but expected an ExposureConfig" ) - parsed = ParsedExposure( + parsed = Exposure( + resource_type=NodeType.Exposure, package_name=package_name, - root_path=self.project.project_root, path=path, original_file_path=self.yaml.path.original_file_path, unique_id=unique_id, @@ -1049,7 +1028,7 @@ def parse_exposure(self, unparsed: UnparsedExposure): ) depends_on_jinja = "\n".join("{{ " + line + "}}" for line in unparsed.depends_on) get_rendered(depends_on_jinja, ctx, parsed, capture_macros=True) - # parsed now has a populated refs/sources + # parsed now has a populated refs/sources/metrics if parsed.config.enabled: self.manifest.add_exposure(self.yaml.file, parsed) @@ -1085,8 +1064,7 @@ def parse(self): UnparsedExposure.validate(data) unparsed = UnparsedExposure.from_dict(data) except (ValidationError, JSONValidationException) as exc: - msg = error_context(self.yaml.path, self.key, data, exc) - raise ParsingException(msg) from exc + raise YamlParseDictFailure(self.yaml.path, self.key, data, exc) self.parse_exposure(unparsed) @@ -1126,9 +1104,9 @@ def parse_metric(self, unparsed: UnparsedMetric): f"Calculated a {type(config)} for a metric, but expected a MetricConfig" ) - parsed = ParsedMetric( + parsed = Metric( + resource_type=NodeType.Metric, package_name=package_name, - root_path=self.project.project_root, path=path, original_file_path=self.yaml.path.original_file_path, unique_id=unique_id, @@ -1203,6 +1181,5 @@ def parse(self): unparsed = UnparsedMetric.from_dict(data) except (ValidationError, JSONValidationException) as exc: - msg = error_context(self.yaml.path, self.key, data, exc) - raise ParsingException(msg) from exc + raise YamlParseDictFailure(self.yaml.path, self.key, data, exc) self.parse_metric(unparsed) diff --git a/core/dbt/parser/seeds.py b/core/dbt/parser/seeds.py index 63550e3f30f..23c77e1ed7c 100644 --- a/core/dbt/parser/seeds.py +++ b/core/dbt/parser/seeds.py @@ -1,15 +1,20 @@ from dbt.context.context_config import ContextConfig -from dbt.contracts.graph.parsed import ParsedSeedNode +from dbt.contracts.graph.nodes import SeedNode from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.search import FileBlock -class SeedParser(SimpleSQLParser[ParsedSeedNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedSeedNode: +class SeedParser(SimpleSQLParser[SeedNode]): + def parse_from_dict(self, dct, validate=True) -> SeedNode: + # seeds need the root_path because the contents are not loaded + dct["root_path"] = self.project.project_root + if "language" in dct: + del dct["language"] + # raw_code is not currently used, but it might be in the future if validate: - ParsedSeedNode.validate(dct) - return ParsedSeedNode.from_dict(dct) + SeedNode.validate(dct) + return SeedNode.from_dict(dct) @property def resource_type(self) -> NodeType: @@ -19,5 +24,5 @@ def resource_type(self) -> NodeType: def get_compiled_path(cls, block: FileBlock): return block.path.relative_path - def render_with_context(self, parsed_node: ParsedSeedNode, config: ContextConfig) -> None: + def render_with_context(self, parsed_node: SeedNode, config: ContextConfig) -> None: """Seeds don't need to do any rendering.""" diff --git a/core/dbt/parser/singular_test.py b/core/dbt/parser/singular_test.py index 22d203a8ebc..fbb3c8ce8fa 100644 --- a/core/dbt/parser/singular_test.py +++ b/core/dbt/parser/singular_test.py @@ -1,15 +1,15 @@ -from dbt.contracts.graph.parsed import ParsedSingularTestNode +from dbt.contracts.graph.nodes import SingularTestNode from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.search import FileBlock from dbt.utils import get_pseudo_test_path -class SingularTestParser(SimpleSQLParser[ParsedSingularTestNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedSingularTestNode: +class SingularTestParser(SimpleSQLParser[SingularTestNode]): + def parse_from_dict(self, dct, validate=True) -> SingularTestNode: if validate: - ParsedSingularTestNode.validate(dct) - return ParsedSingularTestNode.from_dict(dct) + SingularTestNode.validate(dct) + return SingularTestNode.from_dict(dct) @property def resource_type(self) -> NodeType: diff --git a/core/dbt/parser/snapshots.py b/core/dbt/parser/snapshots.py index 71e7bba955f..dffc7d90641 100644 --- a/core/dbt/parser/snapshots.py +++ b/core/dbt/parser/snapshots.py @@ -3,15 +3,15 @@ from dbt.dataclass_schema import ValidationError -from dbt.contracts.graph.parsed import IntermediateSnapshotNode, ParsedSnapshotNode -from dbt.exceptions import ParsingException, validator_error_message +from dbt.contracts.graph.nodes import IntermediateSnapshotNode, SnapshotNode +from dbt.exceptions import InvalidSnapshopConfig from dbt.node_types import NodeType from dbt.parser.base import SQLParser from dbt.parser.search import BlockContents, BlockSearcher, FileBlock from dbt.utils import split_path -class SnapshotParser(SQLParser[IntermediateSnapshotNode, ParsedSnapshotNode]): +class SnapshotParser(SQLParser[IntermediateSnapshotNode, SnapshotNode]): def parse_from_dict(self, dct, validate=True) -> IntermediateSnapshotNode: if validate: IntermediateSnapshotNode.validate(dct) @@ -38,6 +38,8 @@ def set_snapshot_attributes(self, node): # the target schema must be set if we got here, so overwrite the node's # schema node.schema = node.config.target_schema + # We need to set relation_name again, since database/schema might have changed + self._update_node_relation_name(node) return node @@ -53,7 +55,7 @@ def get_fqn(self, path: str, name: str) -> List[str]: fqn.append(name) return fqn - def transform(self, node: IntermediateSnapshotNode) -> ParsedSnapshotNode: + def transform(self, node: IntermediateSnapshotNode) -> SnapshotNode: try: # The config_call_dict is not serialized, because normally # it is not needed after parsing. But since the snapshot node @@ -61,12 +63,12 @@ def transform(self, node: IntermediateSnapshotNode) -> ParsedSnapshotNode: # the model config when there is also schema config. config_call_dict = node.config_call_dict dct = node.to_dict(omit_none=True) - parsed_node = ParsedSnapshotNode.from_dict(dct) + parsed_node = SnapshotNode.from_dict(dct) parsed_node.config_call_dict = config_call_dict self.set_snapshot_attributes(parsed_node) return parsed_node except ValidationError as exc: - raise ParsingException(validator_error_message(exc), node) + raise InvalidSnapshopConfig(exc, node) def parse_file(self, file_block: FileBlock) -> None: blocks = BlockSearcher( diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py index 1c55281db56..cc9acea98c3 100644 --- a/core/dbt/parser/sources.py +++ b/core/dbt/parser/sources.py @@ -1,6 +1,6 @@ import itertools from pathlib import Path -from typing import Iterable, Dict, Optional, Set, Any +from typing import Iterable, Dict, Optional, Set, Any, List from dbt.adapters.factory import get_adapter from dbt.config import RuntimeConfig from dbt.context.context_config import ( @@ -10,10 +10,10 @@ ) from dbt.contracts.graph.manifest import Manifest, SourceKey from dbt.contracts.graph.model_config import SourceConfig -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( UnpatchedSourceDefinition, - ParsedSourceDefinition, - ParsedGenericTestNode, + SourceDefinition, + GenericTestNode, ) from dbt.contracts.graph.unparsed import ( UnparsedSourceDefinition, @@ -24,11 +24,12 @@ UnparsedColumn, Time, ) -from dbt.exceptions import warn_or_error, InternalException +from dbt.events.functions import warn_or_error +from dbt.events.types import UnusedTables +from dbt.exceptions import InternalException from dbt.node_types import NodeType from dbt.parser.schemas import SchemaParser, ParserRef -from dbt import ui # An UnparsedSourceDefinition is taken directly from the yaml @@ -37,7 +38,7 @@ # generate multiple UnpatchedSourceDefinition nodes (one per # table) in the SourceParser.add_source_definitions. The # SourcePatcher takes an UnparsedSourceDefinition and the -# SourcePatch and produces a ParsedSourceDefinition. Each +# SourcePatch and produces a SourceDefinition. Each # SourcePatch can be applied to multiple UnpatchedSourceDefinitions. class SourcePatcher: def __init__( @@ -49,16 +50,16 @@ def __init__( self.manifest = manifest self.schema_parsers: Dict[str, SchemaParser] = {} self.patches_used: Dict[SourceKey, Set[str]] = {} - self.sources: Dict[str, ParsedSourceDefinition] = {} + self.sources: Dict[str, SourceDefinition] = {} # This method calls the 'parse_source' method which takes # the UnpatchedSourceDefinitions in the manifest and combines them - # with SourcePatches to produce ParsedSourceDefinitions. + # with SourcePatches to produce SourceDefinitions. def construct_sources(self) -> None: for unique_id, unpatched in self.manifest.sources.items(): schema_file = self.manifest.files[unpatched.file_id] - if isinstance(unpatched, ParsedSourceDefinition): - # In partial parsing, there will be ParsedSourceDefinitions + if isinstance(unpatched, SourceDefinition): + # In partial parsing, there will be SourceDefinitions # which must be retained. self.sources[unpatched.unique_id] = unpatched continue @@ -79,7 +80,7 @@ def construct_sources(self) -> None: test_from = {"key": "sources", "name": patched.source.name} schema_file.add_test(test.unique_id, test_from) - # Convert UnpatchedSourceDefinition to a ParsedSourceDefinition + # Convert UnpatchedSourceDefinition to a SourceDefinition parsed = self.parse_source(patched) if parsed.config.enabled: self.sources[unique_id] = parsed @@ -117,8 +118,8 @@ def patch_source( table = UnparsedSourceTableDefinition.from_dict(table_dct) return unpatched.replace(source=source, table=table, patch_path=patch_path) - # This converts an UnpatchedSourceDefinition to a ParsedSourceDefinition - def parse_source(self, target: UnpatchedSourceDefinition) -> ParsedSourceDefinition: + # This converts an UnpatchedSourceDefinition to a SourceDefinition + def parse_source(self, target: UnpatchedSourceDefinition) -> SourceDefinition: source = target.source table = target.table refs = ParserRef.from_target(table) @@ -155,12 +156,11 @@ def parse_source(self, target: UnpatchedSourceDefinition) -> ParsedSourceDefinit default_database = self.root_project.credentials.database - parsed_source = ParsedSourceDefinition( + parsed_source = SourceDefinition( package_name=target.package_name, database=(source.database or default_database), schema=(source.schema or source.name), identifier=(table.identifier or table.name), - root_path=target.root_path, path=target.path, original_file_path=target.original_file_path, columns=refs.column_info, @@ -201,9 +201,7 @@ def get_schema_parser_for(self, package_name: str) -> "SchemaParser": self.schema_parsers[package_name] = schema_parser return schema_parser - def get_source_tests( - self, target: UnpatchedSourceDefinition - ) -> Iterable[ParsedGenericTestNode]: + def get_source_tests(self, target: UnpatchedSourceDefinition) -> Iterable[GenericTestNode]: for test, column in target.get_tests(): yield self.parse_source_test( target=target, @@ -215,7 +213,7 @@ def get_patch_for( self, unpatched: UnpatchedSourceDefinition, ) -> Optional[SourcePatch]: - if isinstance(unpatched, ParsedSourceDefinition): + if isinstance(unpatched, SourceDefinition): return None key = (unpatched.package_name, unpatched.source.name) patch: Optional[SourcePatch] = self.manifest.source_patches.get(key) @@ -234,7 +232,7 @@ def parse_source_test( target: UnpatchedSourceDefinition, test: Dict[str, Any], column: Optional[UnparsedColumn], - ) -> ParsedGenericTestNode: + ) -> GenericTestNode: column_name: Optional[str] if column is None: column_name = None @@ -286,7 +284,7 @@ def _generate_source_config(self, target: UnpatchedSourceDefinition, rendered: b patch_config_dict=precedence_configs, ) - def _get_relation_name(self, node: ParsedSourceDefinition): + def _get_relation_name(self, node: SourceDefinition): adapter = get_adapter(self.root_project) relation_cls = adapter.Relation return str(relation_cls.create_from(self.root_project, node)) @@ -307,28 +305,27 @@ def warn_unused(self) -> None: unused_tables[key] = unused if unused_tables: - msg = self.get_unused_msg(unused_tables) - warn_or_error(msg, log_fmt=ui.warning_tag("{}")) + unused_tables_formatted = self.get_unused_msg(unused_tables) + warn_or_error(UnusedTables(unused_tables=unused_tables_formatted)) self.manifest.source_patches = {} def get_unused_msg( self, unused_tables: Dict[SourceKey, Optional[Set[str]]], - ) -> str: - msg = [ - "During parsing, dbt encountered source overrides that had no target:", - ] + ) -> List: + unused_tables_formatted = [] for key, table_names in unused_tables.items(): patch = self.manifest.source_patches[key] patch_name = f"{patch.overrides}.{patch.name}" if table_names is None: - msg.append(f" - Source {patch_name} (in {patch.path})") + unused_tables_formatted.append(f" - Source {patch_name} (in {patch.path})") else: for table_name in sorted(table_names): - msg.append(f" - Source table {patch_name}.{table_name} " f"(in {patch.path})") - msg.append("") - return "\n".join(msg) + unused_tables_formatted.append( + f" - Source table {patch_name}.{table_name} " f"(in {patch.path})" + ) + return unused_tables_formatted def merge_freshness_time_thresholds( diff --git a/core/dbt/parser/sql.py b/core/dbt/parser/sql.py index 35c8f3072dd..82d09c12d6b 100644 --- a/core/dbt/parser/sql.py +++ b/core/dbt/parser/sql.py @@ -3,7 +3,7 @@ from typing import Iterable from dbt.contracts.graph.manifest import SourceFile -from dbt.contracts.graph.parsed import ParsedSqlNode, ParsedMacro +from dbt.contracts.graph.nodes import SqlNode, Macro from dbt.contracts.graph.unparsed import UnparsedMacro from dbt.exceptions import InternalException from dbt.node_types import NodeType @@ -21,11 +21,11 @@ def name(self): return self.block_name -class SqlBlockParser(SimpleSQLParser[ParsedSqlNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedSqlNode: +class SqlBlockParser(SimpleSQLParser[SqlNode]): + def parse_from_dict(self, dct, validate=True) -> SqlNode: if validate: - ParsedSqlNode.validate(dct) - return ParsedSqlNode.from_dict(dct) + SqlNode.validate(dct) + return SqlNode.from_dict(dct) @property def resource_type(self) -> NodeType: @@ -42,21 +42,20 @@ def get_compiled_path(block: FileBlock): return os.path.join("sql", block.name) - def parse_remote(self, sql: str, name: str) -> ParsedSqlNode: + def parse_remote(self, sql: str, name: str) -> SqlNode: source_file = SourceFile.remote(sql, self.project.project_name, "sql") contents = SqlBlock(block_name=name, file=source_file) return self.parse_node(contents) class SqlMacroParser(MacroParser): - def parse_remote(self, contents) -> Iterable[ParsedMacro]: + def parse_remote(self, contents) -> Iterable[Macro]: base = UnparsedMacro( path="from remote system", original_file_path="from remote system", package_name=self.project.project_name, raw_code=contents, language="sql", - root_path=self.project.project_root, resource_type=NodeType.Macro, ) for node in self.parse_unparsed_macros(base): diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index 45f8c0fd0fd..ef78c8d90bf 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -22,7 +22,6 @@ InternalException, ) from dbt.logger import log_manager -import dbt.events.functions as event_logger from dbt.events.functions import fire_event from dbt.events.types import ( DbtProjectError, @@ -37,12 +36,13 @@ InternalExceptionOnRun, GenericExceptionOnRun, NodeConnectionReleaseError, - PrintDebugStackTrace, + LogDebugStackTrace, SkippingDetails, - PrintSkipBecauseError, + LogSkipBecauseError, NodeCompiling, NodeExecuting, ) +from dbt.events.contextvars import get_node_info from .printer import print_run_result_error from dbt.adapters.factory import register_adapter @@ -85,9 +85,6 @@ def pre_init_hook(cls, args): """A hook called before the task is initialized.""" if args.log_format == "json": log_manager.format_json() - # we're mutating the initialized, but not-yet-configured event logger - # because it's being configured too late -- bad! TODO refactor! - event_logger.format_json = True else: log_manager.format_text() @@ -95,9 +92,6 @@ def pre_init_hook(cls, args): def set_log_format(cls): if flags.LOG_FORMAT == "json": log_manager.format_json() - # we're mutating the initialized, but not-yet-configured event logger - # because it's being configured too late -- bad! TODO refactor! - event_logger.format_json = True else: log_manager.format_text() @@ -312,11 +306,10 @@ def skip_result(self, node, message): def compile_and_execute(self, manifest, ctx): result = None with self.adapter.connection_for(self.node): - ctx.node._event_status["node_status"] = RunningStatus.Compiling + ctx.node.update_event_status(node_status=RunningStatus.Compiling) fire_event( NodeCompiling( node_info=ctx.node.node_info, - unique_id=ctx.node.unique_id, ) ) with collect_timing_info("compile") as timing_info: @@ -328,11 +321,10 @@ def compile_and_execute(self, manifest, ctx): # for ephemeral nodes, we only want to compile, not run if not ctx.node.is_ephemeral_model: - ctx.node._event_status["node_status"] = RunningStatus.Executing + ctx.node.update_event_status(node_status=RunningStatus.Executing) fire_event( NodeExecuting( node_info=ctx.node.node_info, - unique_id=ctx.node.unique_id, ) ) with collect_timing_info("execute") as timing_info: @@ -347,7 +339,11 @@ def _handle_catchable_exception(self, e, ctx): if e.node is None: e.add_node(ctx.node) - fire_event(CatchableExceptionOnRun(exc=str(e), exc_info=traceback.format_exc())) + fire_event( + CatchableExceptionOnRun( + exc=str(e), exc_info=traceback.format_exc(), node_info=get_node_info() + ) + ) return str(e) def _handle_internal_exception(self, e, ctx): @@ -362,7 +358,7 @@ def _handle_generic_exception(self, e, ctx): exc=str(e), ) ) - fire_event(PrintDebugStackTrace(exc_info=traceback.format_exc())) + fire_event(LogDebugStackTrace(exc_info=traceback.format_exc())) return str(e) @@ -451,7 +447,7 @@ def on_skip(self): # failure, print a special 'error skip' message. if self._skip_caused_by_ephemeral_failure(): fire_event( - PrintSkipBecauseError( + LogSkipBecauseError( schema=schema_name, relation=node_name, index=self.node_index, diff --git a/core/dbt/task/debug.py b/core/dbt/task/debug.py index 10e3a1ad2a6..853a01ebd1e 100644 --- a/core/dbt/task/debug.py +++ b/core/dbt/task/debug.py @@ -256,9 +256,9 @@ def _load_profile(self): profile: Profile = Profile.render( renderer, profile_name, - self.args.threads, - self.args.target, self.args.profile, + self.args.target, + self.args.threads, ) except dbt.exceptions.DbtConfigError as exc: profile_errors.append(str(exc)) diff --git a/core/dbt/task/deps.py b/core/dbt/task/deps.py index 6b3bc5fb7c4..d03ec3748dc 100644 --- a/core/dbt/task/deps.py +++ b/core/dbt/task/deps.py @@ -1,4 +1,4 @@ -from typing import Dict, Any +from typing import Dict, Any, Optional from dbt import flags @@ -12,7 +12,9 @@ from dbt.config.utils import parse_cli_vars from dbt.deps.base import downloads_directory from dbt.deps.resolver import resolve_packages +from dbt.deps.registry import RegistryPinnedPackage +from dbt.events.proto_types import ListOfStrings from dbt.events.functions import fire_event from dbt.events.types import ( DepsNoPackagesFound, @@ -45,22 +47,27 @@ def __init__( super().__init__(args=args, config=None, project=project) self.cli_vars = cli_vars - def track_package_install(self, package_name: str, source_type: str, version: str) -> None: + def track_package_install( + self, package_name: str, source_type: str, version: Optional[str] + ) -> None: # Hub packages do not need to be hashed, as they are public - # Use the string 'local' for local package versions if source_type == "local": package_name = dbt.utils.md5(package_name) version = "local" + elif source_type == "tarball": + package_name = dbt.utils.md5(package_name) + version = "tarball" elif source_type != "hub": package_name = dbt.utils.md5(package_name) version = dbt.utils.md5(version) + dbt.tracking.track_package_install( "deps", self.project.hashed_name(), {"name": package_name, "source": source_type, "version": version}, ) - def run(self): + def run(self) -> None: system.make_directory(self.project.packages_install_path) packages = self.project.packages.packages if not packages: @@ -81,7 +88,7 @@ def run(self): fire_event(DepsStartPackageInstall(package_name=package_name)) package.install(self.project, renderer) fire_event(DepsInstallInfo(version_name=package.nice_version_name())) - if source_type == "hub": + if isinstance(package, RegistryPinnedPackage): version_latest = package.get_version_latest() if version_latest != version: packages_to_upgrade.append(package_name) @@ -96,7 +103,7 @@ def run(self): ) if packages_to_upgrade: fire_event(EmptyLine()) - fire_event(DepsNotifyUpdatesAvailable(packages=packages_to_upgrade)) + fire_event(DepsNotifyUpdatesAvailable(packages=ListOfStrings(packages_to_upgrade))) @classmethod def _get_unset_profile(cls) -> UnsetProfile: diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py index ab256334271..704368cf24f 100644 --- a/core/dbt/task/freshness.py +++ b/core/dbt/task/freshness.py @@ -16,19 +16,16 @@ FreshnessStatus, ) from dbt.exceptions import RuntimeException, InternalException -from dbt.events.functions import fire_event +from dbt.events.functions import fire_event, info from dbt.events.types import ( FreshnessCheckComplete, - PrintStartLine, - PrintFreshnessErrorLine, - PrintFreshnessErrorStaleLine, - PrintFreshnessWarnLine, - PrintFreshnessPassLine, + LogStartLine, + LogFreshnessResult, ) from dbt.node_types import NodeType from dbt.graph import ResourceTypeSelector -from dbt.contracts.graph.parsed import ParsedSourceDefinition +from dbt.contracts.graph.nodes import SourceDefinition RESULT_FILE_NAME = "sources.json" @@ -41,7 +38,7 @@ def on_skip(self): def before_execute(self): description = "freshness of {0.source_name}.{0.name}".format(self.node) fire_event( - PrintStartLine( + LogStartLine( description=description, index=self.node_index, total=self.num_nodes, @@ -56,50 +53,19 @@ def after_execute(self, result): else: source_name = result.source_name table_name = result.table_name - if result.status == FreshnessStatus.RuntimeErr: - fire_event( - PrintFreshnessErrorLine( - source_name=source_name, - table_name=table_name, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) - ) - elif result.status == FreshnessStatus.Error: - fire_event( - PrintFreshnessErrorStaleLine( - source_name=source_name, - table_name=table_name, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) - ) - elif result.status == FreshnessStatus.Warn: - fire_event( - PrintFreshnessWarnLine( - source_name=source_name, - table_name=table_name, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) - ) - else: - fire_event( - PrintFreshnessPassLine( - source_name=source_name, - table_name=table_name, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) + level = LogFreshnessResult.status_to_level(str(result.status)) + fire_event( + LogFreshnessResult( + info=info(level=level), + status=result.status, + source_name=source_name, + table_name=table_name, + index=self.node_index, + total=self.num_nodes, + execution_time=result.execution_time, + node_info=self.node.node_info, ) + ) def error_result(self, node, message, start_time, timing_info): return self._build_run_result( @@ -175,7 +141,7 @@ class FreshnessSelector(ResourceTypeSelector): def node_is_match(self, node): if not super().node_is_match(node): return False - if not isinstance(node, ParsedSourceDefinition): + if not isinstance(node, SourceDefinition): return False return node.has_freshness diff --git a/core/dbt/task/generate.py b/core/dbt/task/generate.py index 0bc6f3f9527..87723a530a1 100644 --- a/core/dbt/task/generate.py +++ b/core/dbt/task/generate.py @@ -8,7 +8,7 @@ from .compile import CompileTask from dbt.adapters.factory import get_adapter -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.graph.manifest import Manifest from dbt.contracts.results import ( NodeStatus, @@ -22,7 +22,7 @@ ColumnMetadata, CatalogArtifact, ) -from dbt.exceptions import InternalException +from dbt.exceptions import InternalException, AmbiguousCatalogMatch from dbt.include.global_project import DOCS_INDEX_FILE_PATH from dbt.events.functions import fire_event from dbt.events.types import ( @@ -119,7 +119,7 @@ def make_unique_id_map( unique_ids = source_map.get(table.key(), set()) for unique_id in unique_ids: if unique_id in sources: - dbt.exceptions.raise_ambiguous_catalog_match( + raise AmbiguousCatalogMatch( unique_id, sources[unique_id].to_dict(omit_none=True), table.to_dict(omit_none=True), @@ -174,7 +174,7 @@ def format_stats(stats: PrimitiveDict) -> StatsDict: return stats_collector -def mapping_key(node: CompileResultNode) -> CatalogKey: +def mapping_key(node: ResultNode) -> CatalogKey: dkey = dbt.utils.lowercase(node.database) return CatalogKey(dkey, node.schema.lower(), node.identifier.lower()) diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py index df0a181ba5c..e1be8f214d3 100644 --- a/core/dbt/task/list.py +++ b/core/dbt/task/list.py @@ -1,14 +1,15 @@ import json -from dbt.contracts.graph.parsed import ParsedExposure, ParsedSourceDefinition, ParsedMetric +from dbt.contracts.graph.nodes import Exposure, SourceDefinition, Metric from dbt.graph import ResourceTypeSelector from dbt.task.runnable import GraphRunnableTask, ManifestTask from dbt.task.test import TestSelector from dbt.node_types import NodeType -from dbt.exceptions import RuntimeException, InternalException, warn_or_error +from dbt.events.functions import warn_or_error +from dbt.events.types import NoNodesSelected +from dbt.exceptions import RuntimeException, InternalException from dbt.logger import log_manager -import logging -import dbt.events.functions as event_logger +from dbt.events.eventmgr import EventLevel class ListTask(GraphRunnableTask): @@ -60,16 +61,15 @@ def pre_init_hook(cls, args): # - mutating the initialized, not-yet-configured STDOUT event logger # because it's being configured too late -- bad! TODO refactor! log_manager.stderr_console() - event_logger.STDOUT_LOG.level = logging.WARN super().pre_init_hook(args) - return logging.WARN + return EventLevel.WARN def _iterate_selected_nodes(self): selector = self.get_node_selector() spec = self.get_selection_spec() nodes = sorted(selector.get_selected(spec)) if not nodes: - warn_or_error("No nodes selected!") + warn_or_error(NoNodesSelected()) return if self.manifest is None: raise InternalException("manifest is None in _iterate_selected_nodes") @@ -91,17 +91,17 @@ def _iterate_selected_nodes(self): def generate_selectors(self): for node in self._iterate_selected_nodes(): if node.resource_type == NodeType.Source: - assert isinstance(node, ParsedSourceDefinition) + assert isinstance(node, SourceDefinition) # sources are searched for by pkg.source_name.table_name source_selector = ".".join([node.package_name, node.source_name, node.name]) yield f"source:{source_selector}" elif node.resource_type == NodeType.Exposure: - assert isinstance(node, ParsedExposure) + assert isinstance(node, Exposure) # exposures are searched for by pkg.exposure_name exposure_selector = ".".join([node.package_name, node.name]) yield f"exposure:{exposure_selector}" elif node.resource_type == NodeType.Metric: - assert isinstance(node, ParsedMetric) + assert isinstance(node, Metric) # metrics are searched for by pkg.metric_name metric_selector = ".".join([node.package_name, node.name]) yield f"metric:{metric_selector}" diff --git a/core/dbt/task/printer.py b/core/dbt/task/printer.py index 3861b41bef2..edb2592d194 100644 --- a/core/dbt/task/printer.py +++ b/core/dbt/task/printer.py @@ -120,6 +120,8 @@ def print_run_result_error(result, newline: bool = True, is_warning: bool = Fals elif result.message is not None: first = True for line in result.message.split("\n"): + # TODO: why do we format like this? Is there a reason this needs to + # be split instead of sending it as a single log line? if first: fire_event(FirstRunResultError(msg=line)) first = False diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index 21550017202..bc8f9a2de75 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -17,28 +17,26 @@ from dbt.adapters.base import BaseRelation from dbt.clients.jinja import MacroGenerator from dbt.context.providers import generate_runtime_model_context -from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.graph.model_config import Hook -from dbt.contracts.graph.parsed import ParsedHookNode -from dbt.contracts.results import NodeStatus, RunResult, RunStatus, RunningStatus +from dbt.contracts.graph.nodes import HookNode, ResultNode +from dbt.contracts.results import NodeStatus, RunResult, RunStatus, RunningStatus, BaseResult from dbt.exceptions import ( CompilationException, InternalException, + MissingMaterialization, RuntimeException, ValidationException, - missing_materialization, ) -from dbt.events.functions import fire_event, get_invocation_id +from dbt.events.functions import fire_event, get_invocation_id, info from dbt.events.types import ( DatabaseErrorRunningHook, EmptyLine, HooksRunning, HookFinished, - PrintModelErrorResultLine, - PrintModelResultLine, - PrintStartLine, - PrintHookEndLine, - PrintHookStartLine, + LogModelResult, + LogStartLine, + LogHookEndLine, + LogHookStartLine, ) from dbt.logger import ( TextOnly, @@ -80,17 +78,17 @@ def __eq__(self, other): return isinstance(other, self.__class__) -def _hook_list() -> List[ParsedHookNode]: +def _hook_list() -> List[HookNode]: return [] def get_hooks_by_tags( - nodes: Iterable[CompileResultNode], + nodes: Iterable[ResultNode], match_tags: Set[str], -) -> List[ParsedHookNode]: +) -> List[HookNode]: matched_nodes = [] for node in nodes: - if not isinstance(node, ParsedHookNode): + if not isinstance(node, HookNode): continue node_tags = node.tags if len(set(node_tags) & match_tags): @@ -176,7 +174,7 @@ def describe_node(self): def print_start_line(self): fire_event( - PrintStartLine( + LogStartLine( description=self.describe_node(), index=self.node_index, total=self.num_nodes, @@ -187,27 +185,22 @@ def print_start_line(self): def print_result_line(self, result): description = self.describe_node() if result.status == NodeStatus.Error: - fire_event( - PrintModelErrorResultLine( - description=description, - status=result.status, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) - ) + status = result.status + level = "error" else: - fire_event( - PrintModelResultLine( - description=description, - status=result.message, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) + status = result.message + level = "info" + fire_event( + LogModelResult( + description=description, + status=status, + index=self.node_index, + total=self.num_nodes, + execution_time=result.execution_time, + node_info=self.node.node_info, + info=info(level=level), ) + ) def before_execute(self): self.print_start_line() @@ -259,7 +252,7 @@ def execute(self, model, manifest): ) if materialization_macro is None: - missing_materialization(model, self.adapter.type()) + raise MissingMaterialization(model=model, adapter_type=self.adapter.type()) if "config" not in context: raise InternalException( @@ -310,20 +303,20 @@ def get_hook_sql(self, adapter, hook, idx, num_hooks, extra_context): hook_obj = get_hook(statement, index=hook_index) return hook_obj.sql or "" - def _hook_keyfunc(self, hook: ParsedHookNode) -> Tuple[str, Optional[int]]: + def _hook_keyfunc(self, hook: HookNode) -> Tuple[str, Optional[int]]: package_name = hook.package_name if package_name == self.config.project_name: package_name = BiggestName("") return package_name, hook.index - def get_hooks_by_type(self, hook_type: RunHookType) -> List[ParsedHookNode]: + def get_hooks_by_type(self, hook_type: RunHookType) -> List[HookNode]: if self.manifest is None: raise InternalException("self.manifest was None in get_hooks_by_type") nodes = self.manifest.nodes.values() # find all hooks defined in the manifest (could be multiple projects) - hooks: List[ParsedHookNode] = get_hooks_by_tags(nodes, {hook_type}) + hooks: List[HookNode] = get_hooks_by_tags(nodes, {hook_type}) hooks.sort(key=self._hook_keyfunc) return hooks @@ -346,8 +339,9 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): finishctx = TimestampNamed("node_finished_at") for idx, hook in enumerate(ordered_hooks, start=1): - hook._event_status["started_at"] = datetime.utcnow().isoformat() - hook._event_status["node_status"] = RunningStatus.Started + hook.update_event_status( + started_at=datetime.utcnow().isoformat(), node_status=RunningStatus.Started + ) sql = self.get_hook_sql(adapter, hook, idx, num_hooks, extra_context) hook_text = "{}.{}.{}".format(hook.package_name, hook_type, hook.index) @@ -355,7 +349,7 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): with UniqueID(hook.unique_id): with hook_meta_ctx, startctx: fire_event( - PrintHookStartLine( + LogHookStartLine( statement=hook_text, index=idx, total=num_hooks, @@ -371,11 +365,11 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): status = "OK" self.ran_hooks.append(hook) - hook._event_status["finished_at"] = datetime.utcnow().isoformat() + hook.update_event_status(finished_at=datetime.utcnow().isoformat()) with finishctx, DbtModelState({"node_status": "passed"}): - hook._event_status["node_status"] = RunStatus.Success + hook.update_event_status(node_status=RunStatus.Success) fire_event( - PrintHookEndLine( + LogHookEndLine( statement=hook_text, status=status, index=idx, @@ -386,9 +380,7 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): ) # `_event_status` dict is only used for logging. Make sure # it gets deleted when we're done with it - del hook._event_status["started_at"] - del hook._event_status["finished_at"] - del hook._event_status["node_status"] + hook.clear_event_status() self._total_executed += len(ordered_hooks) @@ -400,12 +392,22 @@ def safe_run_hooks( ) -> None: try: self.run_hooks(adapter, hook_type, extra_context) - except RuntimeException: + except RuntimeException as exc: fire_event(DatabaseErrorRunningHook(hook_type=hook_type.value)) - raise + self.node_results.append( + BaseResult( + status=RunStatus.Error, + thread_id="main", + timing=[], + message=f"{hook_type.value} failed, error:\n {exc.msg}", + adapter_response={}, + execution_time=0, + failures=1, + ) + ) def print_results_line(self, results, execution_time): - nodes = [r.node for r in results] + self.ran_hooks + nodes = [r.node for r in results if hasattr(r, "node")] + self.ran_hooks stat_line = get_counts(nodes) execution = "" @@ -450,9 +452,6 @@ def after_run(self, adapter, results): with adapter.connection_named("master"): self.safe_run_hooks(adapter, RunHookType.End, extras) - def after_hooks(self, adapter, results, elapsed): - self.print_results_line(results, elapsed) - def get_node_selector(self) -> ResourceTypeSelector: if self.manifest is None or self.graph is None: raise InternalException("manifest and graph must be set to get perform node selection") diff --git a/core/dbt/task/run_operation.py b/core/dbt/task/run_operation.py index e7b43a837b0..b9d3115482e 100644 --- a/core/dbt/task/run_operation.py +++ b/core/dbt/task/run_operation.py @@ -15,7 +15,7 @@ from dbt.events.types import ( RunningOperationCaughtError, RunningOperationUncaughtError, - PrintDebugStackTrace, + LogDebugStackTrace, ) @@ -62,11 +62,11 @@ def run(self) -> RunOperationResultsArtifact: self._run_unsafe() except dbt.exceptions.Exception as exc: fire_event(RunningOperationCaughtError(exc=str(exc))) - fire_event(PrintDebugStackTrace(exc_info=traceback.format_exc())) + fire_event(LogDebugStackTrace(exc_info=traceback.format_exc())) success = False except Exception as exc: fire_event(RunningOperationUncaughtError(exc=str(exc))) - fire_event(PrintDebugStackTrace(exc_info=traceback.format_exc())) + fire_event(LogDebugStackTrace(exc_info=traceback.format_exc())) success = False else: success = True diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index c6866cde2e1..fa8fdb724a8 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -26,20 +26,21 @@ ModelMetadata, NodeCount, ) -from dbt.events.functions import fire_event +from dbt.events.functions import fire_event, warn_or_error from dbt.events.types import ( EmptyLine, - PrintCancelLine, + LogCancelLine, DefaultSelector, NodeStart, NodeFinished, QueryCancelationUnsupported, ConcurrencyLine, EndRunResult, + NothingToDo, ) -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.events.contextvars import log_contextvars from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedSourceDefinition +from dbt.contracts.graph.nodes import SourceDefinition, ResultNode from dbt.contracts.results import NodeStatus, RunExecutionResult, RunningStatus from dbt.contracts.state import PreviousState from dbt.exceptions import ( @@ -47,7 +48,6 @@ NotImplementedException, RuntimeException, FailFastException, - warn_or_error, ) from dbt.graph import GraphQueue, NodeSelector, SelectionSpec, parse_difference, Graph @@ -57,7 +57,6 @@ import dbt.exceptions from dbt import flags import dbt.utils -from dbt.ui import warning_tag RESULT_FILE_NAME = "run_results.json" MANIFEST_FILE_NAME = "manifest.json" @@ -108,7 +107,7 @@ class GraphRunnableTask(ManifestTask): def __init__(self, args, config): super().__init__(args, config) self.job_queue: Optional[GraphQueue] = None - self._flattened_nodes: Optional[List[CompileResultNode]] = None + self._flattened_nodes: Optional[List[ResultNode]] = None self.run_count: int = 0 self.num_nodes: int = 0 @@ -213,47 +212,45 @@ def get_runner(self, node): def call_runner(self, runner): uid_context = UniqueID(runner.node.unique_id) - with RUNNING_STATE, uid_context: + with RUNNING_STATE, uid_context, log_contextvars(node_info=runner.node.node_info): startctx = TimestampNamed("node_started_at") index = self.index_offset(runner.node_index) - runner.node._event_status["started_at"] = datetime.utcnow().isoformat() - runner.node._event_status["node_status"] = RunningStatus.Started + runner.node.update_event_status( + started_at=datetime.utcnow().isoformat(), node_status=RunningStatus.Started + ) extended_metadata = ModelMetadata(runner.node, index) with startctx, extended_metadata: fire_event( NodeStart( node_info=runner.node.node_info, - unique_id=runner.node.unique_id, ) ) status: Dict[str, str] = {} try: result = runner.run_with_hooks(self.manifest) status = runner.get_result_status(result) - runner.node._event_status["node_status"] = result.status - runner.node._event_status["finished_at"] = datetime.utcnow().isoformat() + runner.node.update_event_status( + node_status=result.status, finished_at=datetime.utcnow().isoformat() + ) finally: finishctx = TimestampNamed("finished_at") with finishctx, DbtModelState(status): fire_event( NodeFinished( node_info=runner.node.node_info, - unique_id=runner.node.unique_id, run_result=result.to_msg(), ) ) # `_event_status` dict is only used for logging. Make sure # it gets deleted when we're done with it - del runner.node._event_status["started_at"] - del runner.node._event_status["finished_at"] - del runner.node._event_status["node_status"] + runner.node.clear_event_status() fail_fast = flags.FAIL_FAST if result.status in (NodeStatus.Error, NodeStatus.Fail) and fail_fast: self._raise_next_tick = FailFastException( - message="Failing early due to test failure or runtime error", + msg="Failing early due to test failure or runtime error", result=result, node=getattr(result, "node", None), ) @@ -339,7 +336,7 @@ def _handle_result(self, result): if self.manifest is None: raise InternalException("manifest was None in _handle_result") - if isinstance(node, ParsedSourceDefinition): + if isinstance(node, SourceDefinition): self.manifest.update_source(node) else: self.manifest.update_node(node) @@ -371,7 +368,7 @@ def _cancel_connections(self, pool): continue # if we don't have a manifest/don't have a node, print # anyway. - fire_event(PrintCancelLine(conn_name=conn_name)) + fire_event(LogCancelLine(conn_name=conn_name)) pool.join() @@ -379,8 +376,13 @@ def execute_nodes(self): num_threads = self.config.threads target_name = self.config.target_name + # following line can be removed when legacy logger is removed with NodeCount(self.num_nodes): - fire_event(ConcurrencyLine(num_threads=num_threads, target_name=target_name)) + fire_event( + ConcurrencyLine( + num_threads=num_threads, target_name=target_name, node_count=self.num_nodes + ) + ) with TextOnly(): fire_event(EmptyLine()) @@ -421,9 +423,6 @@ def populate_adapter_cache(self, adapter, required_schemas: Set[BaseRelation] = {"adapter_cache_construction_elapsed": cache_populate_time} ) - def before_hooks(self, adapter): - pass - def before_run(self, adapter, selected_uids: AbstractSet[str]): with adapter.connection_named("master"): self.populate_adapter_cache(adapter) @@ -431,24 +430,24 @@ def before_run(self, adapter, selected_uids: AbstractSet[str]): def after_run(self, adapter, results): pass - def after_hooks(self, adapter, results, elapsed): + def print_results_line(self, node_results, elapsed): pass def execute_with_hooks(self, selected_uids: AbstractSet[str]): adapter = get_adapter(self.config) + started = time.time() try: - self.before_hooks(adapter) - started = time.time() self.before_run(adapter, selected_uids) res = self.execute_nodes() self.after_run(adapter, res) - elapsed = time.time() - started - self.after_hooks(adapter, res, elapsed) - finally: adapter.cleanup_connections() + elapsed = time.time() - started + self.print_results_line(self.node_results, elapsed) + result = self.get_result( + results=self.node_results, elapsed_time=elapsed, generated_at=datetime.utcnow() + ) - result = self.get_result(results=res, elapsed_time=elapsed, generated_at=datetime.utcnow()) return result def write_result(self, result): @@ -466,8 +465,7 @@ def run(self): if len(self._flattened_nodes) == 0: with TextOnly(): fire_event(EmptyLine()) - msg = "Nothing to do. Try checking your model configs and model specification args" - warn_or_error(msg, log_fmt=warning_tag("{}")) + warn_or_error(NothingToDo()) result = self.get_result( results=[], generated_at=datetime.utcnow(), diff --git a/core/dbt/task/seed.py b/core/dbt/task/seed.py index 01535916ad8..5c922a5ba90 100644 --- a/core/dbt/task/seed.py +++ b/core/dbt/task/seed.py @@ -9,14 +9,13 @@ from dbt.exceptions import InternalException from dbt.graph import ResourceTypeSelector from dbt.logger import TextOnly -from dbt.events.functions import fire_event +from dbt.events.functions import fire_event, info from dbt.events.types import ( SeedHeader, SeedHeaderSeparator, EmptyLine, - PrintSeedErrorResultLine, - PrintSeedResultLine, - PrintStartLine, + LogSeedResult, + LogStartLine, ) from dbt.node_types import NodeType from dbt.contracts.results import NodeStatus @@ -28,7 +27,7 @@ def describe_node(self): def before_execute(self): fire_event( - PrintStartLine( + LogStartLine( description=self.describe_node(), index=self.node_index, total=self.num_nodes, @@ -47,30 +46,20 @@ def compile(self, manifest): def print_result_line(self, result): model = result.node - if result.status == NodeStatus.Error: - fire_event( - PrintSeedErrorResultLine( - status=result.status, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - schema=self.node.schema, - relation=model.alias, - node_info=model.node_info, - ) - ) - else: - fire_event( - PrintSeedResultLine( - status=result.message, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - schema=self.node.schema, - relation=model.alias, - node_info=model.node_info, - ) + level = "error" if result.status == NodeStatus.Error else "info" + fire_event( + LogSeedResult( + info=info(level=level), + status=result.status, + result_message=result.message, + index=self.node_index, + total=self.num_nodes, + execution_time=result.execution_time, + schema=self.node.schema, + relation=model.alias, + node_info=model.node_info, ) + ) class SeedTask(RunTask): diff --git a/core/dbt/task/snapshot.py b/core/dbt/task/snapshot.py index 7bd62ffb55b..44ccbd88361 100644 --- a/core/dbt/task/snapshot.py +++ b/core/dbt/task/snapshot.py @@ -1,8 +1,8 @@ from .run import ModelRunner, RunTask from dbt.exceptions import InternalException -from dbt.events.functions import fire_event -from dbt.events.types import PrintSnapshotErrorResultLine, PrintSnapshotResultLine +from dbt.events.functions import fire_event, info +from dbt.events.types import LogSnapshotResult from dbt.graph import ResourceTypeSelector from dbt.node_types import NodeType from dbt.contracts.results import NodeStatus @@ -15,30 +15,19 @@ def describe_node(self): def print_result_line(self, result): model = result.node cfg = model.config.to_dict(omit_none=True) - if result.status == NodeStatus.Error: - fire_event( - PrintSnapshotErrorResultLine( - status=result.status, - description=self.get_node_representation(), - cfg=cfg, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=model.node_info, - ) - ) - else: - fire_event( - PrintSnapshotResultLine( - status=result.message, - description=self.get_node_representation(), - cfg=cfg, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=model.node_info, - ) + level = "error" if result.status == NodeStatus.Error else "info" + fire_event( + LogSnapshotResult( + info=info(level=level), + status=result.status, + description=self.get_node_representation(), + cfg=cfg, + index=self.node_index, + total=self.num_nodes, + execution_time=result.execution_time, + node_info=model.node_info, ) + ) class SnapshotTask(RunTask): diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py index ee871b6179d..26d6d46f028 100644 --- a/core/dbt/task/test.py +++ b/core/dbt/task/test.py @@ -5,29 +5,27 @@ from dbt.events.format import pluralize from dbt.dataclass_schema import dbtClassMixin import threading -from typing import Union from .compile import CompileRunner from .run import RunTask -from dbt.contracts.graph.compiled import ( - CompiledSingularTestNode, - CompiledGenericTestNode, - CompiledTestNode, +from dbt.contracts.graph.nodes import ( + TestNode, ) from dbt.contracts.graph.manifest import Manifest from dbt.contracts.results import TestStatus, PrimitiveDict, RunResult from dbt.context.providers import generate_runtime_model_context from dbt.clients.jinja import MacroGenerator -from dbt.events.functions import fire_event +from dbt.events.functions import fire_event, info from dbt.events.types import ( - PrintErrorTestResult, - PrintPassTestResult, - PrintWarnTestResult, - PrintFailureTestResult, - PrintStartLine, + LogTestResult, + LogStartLine, +) +from dbt.exceptions import ( + InternalException, + InvalidBoolean, + MissingMaterialization, ) -from dbt.exceptions import InternalException, invalid_bool_error, missing_materialization from dbt.graph import ( ResourceTypeSelector, ) @@ -53,7 +51,7 @@ def convert_bool_type(field) -> bool: try: return bool(strtobool(field)) # type: ignore except ValueError: - raise invalid_bool_error(field, "get_test_sql") + raise InvalidBoolean(field, "get_test_sql") # need this so we catch both true bools and 0/1 return bool(field) @@ -67,54 +65,22 @@ def describe_node(self): def print_result_line(self, result): model = result.node - if result.status == TestStatus.Error: - fire_event( - PrintErrorTestResult( - name=model.name, - index=self.node_index, - num_models=self.num_nodes, - execution_time=result.execution_time, - node_info=model.node_info, - ) - ) - elif result.status == TestStatus.Pass: - fire_event( - PrintPassTestResult( - name=model.name, - index=self.node_index, - num_models=self.num_nodes, - execution_time=result.execution_time, - node_info=model.node_info, - ) - ) - elif result.status == TestStatus.Warn: - fire_event( - PrintWarnTestResult( - name=model.name, - index=self.node_index, - num_models=self.num_nodes, - execution_time=result.execution_time, - num_failures=result.failures, - node_info=model.node_info, - ) - ) - elif result.status == TestStatus.Fail: - fire_event( - PrintFailureTestResult( - name=model.name, - index=self.node_index, - num_models=self.num_nodes, - execution_time=result.execution_time, - num_failures=result.failures, - node_info=model.node_info, - ) + fire_event( + LogTestResult( + name=model.name, + info=info(level=LogTestResult.status_to_level(str(result.status))), + status=str(result.status), + index=self.node_index, + num_models=self.num_nodes, + execution_time=result.execution_time, + node_info=model.node_info, + num_failures=result.failures, ) - else: - raise RuntimeError("unexpected status: {}".format(result.status)) + ) def print_start_line(self): fire_event( - PrintStartLine( + LogStartLine( description=self.describe_node(), index=self.node_index, total=self.num_nodes, @@ -126,7 +92,7 @@ def before_execute(self): self.print_start_line() def execute_test( - self, test: Union[CompiledSingularTestNode, CompiledGenericTestNode], manifest: Manifest + self, test: TestNode, manifest: Manifest ) -> TestResultData: context = generate_runtime_model_context(test, self.config, manifest) @@ -135,7 +101,7 @@ def execute_test( ) if materialization_macro is None: - missing_materialization(test, self.adapter.type()) + raise MissingMaterialization(model=test, adapter_type=self.adapter.type()) if "config" not in context: raise InternalException( @@ -174,7 +140,7 @@ def execute_test( TestResultData.validate(test_result_dct) return TestResultData.from_dict(test_result_dct) - def execute(self, test: CompiledTestNode, manifest: Manifest): + def execute(self, test: TestNode, manifest: Manifest): result = self.execute_test(test, manifest) severity = test.config.severity.upper() diff --git a/core/dbt/tests/fixtures/project.py b/core/dbt/tests/fixtures/project.py index ffea566f4db..a8c640ef116 100644 --- a/core/dbt/tests/fixtures/project.py +++ b/core/dbt/tests/fixtures/project.py @@ -6,11 +6,11 @@ import warnings import yaml -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationException, DatabaseException import dbt.flags as flags from dbt.config.runtime import RuntimeConfig from dbt.adapters.factory import get_adapter, register_adapter, reset_adapters, get_adapter_by_type -from dbt.events.functions import setup_event_logger +from dbt.events.functions import setup_event_logger, cleanup_event_logger from dbt.tests.util import ( write_file, run_sql_with_adapter, @@ -229,6 +229,15 @@ def selectors_yml(project_root, selectors): write_file(data, project_root, "selectors.yml") +# This fixture ensures that the logging infrastructure does not accidentally +# reuse streams configured on previous test runs, which might now be closed. +# It should be run before (and so included as a parameter by) any other fixture +# which runs dbt-core functions that might fire events. +@pytest.fixture(scope="class") +def clean_up_logging(): + cleanup_event_logger() + + # This creates an adapter that is used for running test setup, such as creating # the test schema, and sql commands that are run in tests prior to the first # dbt command. After a dbt command is run, the project.adapter property will @@ -240,7 +249,7 @@ def selectors_yml(project_root, selectors): # otherwise this will fail. So to test errors in those areas, you need to copy the files # into the project in the tests instead of putting them in the fixtures. @pytest.fixture(scope="class") -def adapter(unique_schema, project_root, profiles_root, profiles_yml, dbt_project_yml): +def adapter(unique_schema, project_root, profiles_root, profiles_yml, dbt_project_yml, clean_up_logging): # The profiles.yml and dbt_project.yml should already be written out args = Namespace( profiles_dir=str(profiles_root), project_dir=str(project_root), target=None, profile=None, threads=None @@ -438,6 +447,7 @@ def get_tables_in_schema(self): # to pull in the other fixtures individually to access their information. @pytest.fixture(scope="class") def project( + clean_up_logging, project_root, profiles_root, request, @@ -484,9 +494,10 @@ def project( # a `load_dependencies` method. # Macros gets executed as part of drop_scheme in core/dbt/adapters/sql/impl.py. When # the macros have errors (which is what we're actually testing for...) they end up - # throwing CompilationExceptions + # throwing CompilationExceptions or DatabaseExceptions try: project.drop_test_schema() - except (KeyError, AttributeError, CompilationException): + except (KeyError, AttributeError, CompilationException, DatabaseException): pass os.chdir(orig_cwd) + cleanup_event_logger() diff --git a/core/dbt/tests/util.py b/core/dbt/tests/util.py index 6cdc4ee5b77..824e6f88630 100644 --- a/core/dbt/tests/util.py +++ b/core/dbt/tests/util.py @@ -1,3 +1,4 @@ +from io import StringIO import os import shutil import yaml @@ -88,7 +89,8 @@ def run_dbt(args: List[str] = None, expect_pass=True): # will turn the logs into json, so you have to be prepared for that. def run_dbt_and_capture(args: List[str] = None, expect_pass=True): try: - stringbuf = capture_stdout_logs() + stringbuf = StringIO() + capture_stdout_logs(stringbuf) res = run_dbt(args, expect_pass=expect_pass) stdout = stringbuf.getvalue() @@ -235,7 +237,7 @@ def run_sql_with_adapter(adapter, sql, fetch=None): return adapter.run_sql_for_tests(sql, fetch, conn) -# Get a Relation object from the identifer (name of table/view). +# Get a Relation object from the identifier (name of table/view). # Uses the default database and schema. If you need a relation # with a different schema, it should be constructed in the test. # Uses: diff --git a/core/dbt/utils.py b/core/dbt/utils.py index b7cc6475319..987371b6b02 100644 --- a/core/dbt/utils.py +++ b/core/dbt/utils.py @@ -15,7 +15,7 @@ from pathlib import PosixPath, WindowsPath from contextlib import contextmanager -from dbt.exceptions import ConnectionException +from dbt.exceptions import ConnectionException, DuplicateAlias from dbt.events.functions import fire_event from dbt.events.types import RetryExternalCall, RecordRetryException from dbt import flags @@ -365,7 +365,7 @@ def translate_mapping(self, kwargs: Mapping[str, Any]) -> Dict[str, Any]: for key, value in kwargs.items(): canonical_key = self.aliases.get(key, key) if canonical_key in result: - dbt.exceptions.raise_duplicate_alias(kwargs, self.aliases, canonical_key) + raise DuplicateAlias(kwargs, self.aliases, canonical_key) result[canonical_key] = self.translate_value(value) return result diff --git a/core/dbt/version.py b/core/dbt/version.py index 65b3a08c476..d668a902ae6 100644 --- a/core/dbt/version.py +++ b/core/dbt/version.py @@ -235,5 +235,5 @@ def _get_adapter_plugin_names() -> Iterator[str]: yield plugin_name -__version__ = "1.4.0a1" +__version__ = "1.4.0b1" installed = get_installed_version() diff --git a/core/setup.py b/core/setup.py index eaad87423c2..241a70ab6bb 100644 --- a/core/setup.py +++ b/core/setup.py @@ -25,7 +25,7 @@ package_name = "dbt-core" -package_version = "1.4.0a1" +package_version = "1.4.0b1" description = """With dbt, data analysts and engineers can build analytics \ the way engineers build applications.""" @@ -50,11 +50,11 @@ "agate>=1.6,<1.6.4", "betterproto==1.2.5", "click>=7.0,<9", - "colorama>=0.3.9,<0.4.6", + "colorama>=0.3.9,<0.4.7", "hologram>=0.0.14,<=0.0.15", "isodate>=0.6,<0.7", "logbook>=1.5,<1.6", - "mashumaro[msgpack]==3.0.4", + "mashumaro[msgpack]==3.2", "minimal-snowplow-tracker==0.0.2", "networkx>=2.3,<2.8.1;python_version<'3.8'", "networkx>=2.3,<3;python_version>='3.8'", @@ -63,7 +63,7 @@ "dbt-extractor~=0.4.1", "typing-extensions>=3.7.4", "werkzeug>=1,<3", - "pathspec~=0.9.0", + "pathspec>=0.9,<0.11", # the following are all to match snowflake-connector-python "requests<3.0.0", "idna>=2.5,<4", @@ -81,6 +81,7 @@ "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", ], python_requires=">=3.7.2", ) diff --git a/docker/Dockerfile b/docker/Dockerfile index 8d3756ca786..72332c35de9 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -14,12 +14,12 @@ FROM --platform=$build_for python:3.10.7-slim-bullseye as base # N.B. The refs updated automagically every release via bumpversion # N.B. dbt-postgres is currently found in the core codebase so a value of dbt-core@ is correct -ARG dbt_core_ref=dbt-core@v1.4.0a1 -ARG dbt_postgres_ref=dbt-core@v1.4.0a1 -ARG dbt_redshift_ref=dbt-redshift@v1.4.0a1 -ARG dbt_bigquery_ref=dbt-bigquery@v1.4.0a1 -ARG dbt_snowflake_ref=dbt-snowflake@v1.4.0a1 -ARG dbt_spark_ref=dbt-spark@v1.4.0a1 +ARG dbt_core_ref=dbt-core@v1.4.0b1 +ARG dbt_postgres_ref=dbt-core@v1.4.0b1 +ARG dbt_redshift_ref=dbt-redshift@v1.4.0b1 +ARG dbt_bigquery_ref=dbt-bigquery@v1.4.0b1 +ARG dbt_snowflake_ref=dbt-snowflake@v1.4.0b1 +ARG dbt_spark_ref=dbt-spark@v1.4.0b1 # special case args ARG dbt_spark_version=all ARG dbt_third_party diff --git a/docker/README.md b/docker/README.md index 4adde533d37..7a48010b7d3 100644 --- a/docker/README.md +++ b/docker/README.md @@ -105,7 +105,7 @@ The `ENTRYPOINT` for this Dockerfile is the command `dbt` so you can bind-mount docker run \ --network=host --mount type=bind,source=path/to/project,target=/usr/app \ ---mount type=bind,source=path/to/profiles.yml,target=/root/.dbt/ \ +--mount type=bind,source=path/to/profiles.yml,target=/root/.dbt/profiles.yml \ my-dbt \ ls ``` diff --git a/plugins/postgres/dbt/adapters/postgres/__version__.py b/plugins/postgres/dbt/adapters/postgres/__version__.py index 70ba273f562..27cfeecd9e8 100644 --- a/plugins/postgres/dbt/adapters/postgres/__version__.py +++ b/plugins/postgres/dbt/adapters/postgres/__version__.py @@ -1 +1 @@ -version = "1.4.0a1" +version = "1.4.0b1" diff --git a/plugins/postgres/dbt/adapters/postgres/impl.py b/plugins/postgres/dbt/adapters/postgres/impl.py index 3664e8d2a51..78b86234eae 100644 --- a/plugins/postgres/dbt/adapters/postgres/impl.py +++ b/plugins/postgres/dbt/adapters/postgres/impl.py @@ -8,7 +8,13 @@ from dbt.adapters.postgres import PostgresColumn from dbt.adapters.postgres import PostgresRelation from dbt.dataclass_schema import dbtClassMixin, ValidationError -import dbt.exceptions +from dbt.exceptions import ( + CrossDbReferenceProhibited, + IndexConfigNotDict, + InvalidIndexConfig, + RuntimeException, + UnexpectedDbReference, +) import dbt.utils @@ -40,14 +46,9 @@ def parse(cls, raw_index) -> Optional["PostgresIndexConfig"]: cls.validate(raw_index) return cls.from_dict(raw_index) except ValidationError as exc: - msg = dbt.exceptions.validator_error_message(exc) - dbt.exceptions.raise_compiler_error(f"Could not parse index config: {msg}") + raise InvalidIndexConfig(exc) except TypeError: - dbt.exceptions.raise_compiler_error( - f"Invalid index config:\n" - f" Got: {raw_index}\n" - f' Expected a dictionary with at minimum a "columns" key' - ) + raise IndexConfigNotDict(raw_index) @dataclass @@ -73,11 +74,7 @@ def verify_database(self, database): database = database.strip('"') expected = self.config.credentials.database if database.lower() != expected.lower(): - raise dbt.exceptions.NotImplementedException( - "Cross-db references not allowed in {} ({} vs {})".format( - self.type(), database, expected - ) - ) + raise UnexpectedDbReference(self.type(), database, expected) # return an empty string on success so macros can call this return "" @@ -110,12 +107,8 @@ def _get_catalog_schemas(self, manifest): schemas = super()._get_catalog_schemas(manifest) try: return schemas.flatten() - except dbt.exceptions.RuntimeException as exc: - dbt.exceptions.raise_compiler_error( - "Cross-db references not allowed in adapter {}: Got {}".format( - self.type(), exc.msg - ) - ) + except RuntimeException as exc: + raise CrossDbReferenceProhibited(self.type(), exc.msg) def _link_cached_relations(self, manifest): schemas: Set[str] = set() diff --git a/plugins/postgres/setup.py b/plugins/postgres/setup.py index 6b76e5cc375..00a91759aec 100644 --- a/plugins/postgres/setup.py +++ b/plugins/postgres/setup.py @@ -41,7 +41,7 @@ def _dbt_psycopg2_name(): package_name = "dbt-postgres" -package_version = "1.4.0a1" +package_version = "1.4.0b1" description = """The postgres adapter plugin for dbt (data build tool)""" this_directory = os.path.abspath(os.path.dirname(__file__)) @@ -83,6 +83,7 @@ def _dbt_psycopg2_name(): "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", ], python_requires=">=3.7", ) diff --git a/schemas/dbt/manifest/v8.json b/schemas/dbt/manifest/v8.json new file mode 100644 index 00000000000..d92dc46b79c --- /dev/null +++ b/schemas/dbt/manifest/v8.json @@ -0,0 +1,4362 @@ +{ + "type": "object", + "required": [ + "metadata", + "nodes", + "sources", + "macros", + "docs", + "exposures", + "metrics", + "selectors" + ], + "properties": { + "metadata": { + "$ref": "#/definitions/ManifestMetadata", + "description": "Metadata about the manifest" + }, + "nodes": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/AnalysisNode" + }, + { + "$ref": "#/definitions/SingularTestNode" + }, + { + "$ref": "#/definitions/HookNode" + }, + { + "$ref": "#/definitions/ModelNode" + }, + { + "$ref": "#/definitions/RPCNode" + }, + { + "$ref": "#/definitions/SqlNode" + }, + { + "$ref": "#/definitions/GenericTestNode" + }, + { + "$ref": "#/definitions/SnapshotNode" + }, + { + "$ref": "#/definitions/SeedNode" + } + ] + }, + "description": "The nodes defined in the dbt project and its dependencies" + }, + "sources": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/SourceDefinition" + }, + "description": "The sources defined in the dbt project and its dependencies" + }, + "macros": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Macro" + }, + "description": "The macros defined in the dbt project and its dependencies" + }, + "docs": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Documentation" + }, + "description": "The docs defined in the dbt project and its dependencies" + }, + "exposures": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Exposure" + }, + "description": "The exposures defined in the dbt project and its dependencies" + }, + "metrics": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Metric" + }, + "description": "The metrics defined in the dbt project and its dependencies" + }, + "selectors": { + "type": "object", + "description": "The selectors defined in selectors.yml" + }, + "disabled": { + "oneOf": [ + { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/AnalysisNode" + }, + { + "$ref": "#/definitions/SingularTestNode" + }, + { + "$ref": "#/definitions/HookNode" + }, + { + "$ref": "#/definitions/ModelNode" + }, + { + "$ref": "#/definitions/RPCNode" + }, + { + "$ref": "#/definitions/SqlNode" + }, + { + "$ref": "#/definitions/GenericTestNode" + }, + { + "$ref": "#/definitions/SnapshotNode" + }, + { + "$ref": "#/definitions/SeedNode" + }, + { + "$ref": "#/definitions/SourceDefinition" + } + ] + } + } + }, + { + "type": "null" + } + ], + "description": "A mapping of the disabled nodes in the target" + }, + "parent_map": { + "oneOf": [ + { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + { + "type": "null" + } + ], + "description": "A mapping from\u00a0child nodes to their dependencies" + }, + "child_map": { + "oneOf": [ + { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + { + "type": "null" + } + ], + "description": "A mapping from parent nodes to their dependents" + } + }, + "additionalProperties": false, + "description": "WritableManifest(metadata: dbt.contracts.graph.manifest.ManifestMetadata, nodes: Mapping[str, Union[dbt.contracts.graph.nodes.AnalysisNode, dbt.contracts.graph.nodes.SingularTestNode, dbt.contracts.graph.nodes.HookNode, dbt.contracts.graph.nodes.ModelNode, dbt.contracts.graph.nodes.RPCNode, dbt.contracts.graph.nodes.SqlNode, dbt.contracts.graph.nodes.GenericTestNode, dbt.contracts.graph.nodes.SnapshotNode, dbt.contracts.graph.nodes.SeedNode]], sources: Mapping[str, dbt.contracts.graph.nodes.SourceDefinition], macros: Mapping[str, dbt.contracts.graph.nodes.Macro], docs: Mapping[str, dbt.contracts.graph.nodes.Documentation], exposures: Mapping[str, dbt.contracts.graph.nodes.Exposure], metrics: Mapping[str, dbt.contracts.graph.nodes.Metric], selectors: Mapping[str, Any], disabled: Optional[Mapping[str, List[Union[dbt.contracts.graph.nodes.AnalysisNode, dbt.contracts.graph.nodes.SingularTestNode, dbt.contracts.graph.nodes.HookNode, dbt.contracts.graph.nodes.ModelNode, dbt.contracts.graph.nodes.RPCNode, dbt.contracts.graph.nodes.SqlNode, dbt.contracts.graph.nodes.GenericTestNode, dbt.contracts.graph.nodes.SnapshotNode, dbt.contracts.graph.nodes.SeedNode, dbt.contracts.graph.nodes.SourceDefinition]]]], parent_map: Optional[Dict[str, List[str]]], child_map: Optional[Dict[str, List[str]]])", + "definitions": { + "ManifestMetadata": { + "type": "object", + "required": [], + "properties": { + "dbt_schema_version": { + "type": "string", + "default": "https://schemas.getdbt.com/dbt/manifest/v8.json" + }, + "dbt_version": { + "type": "string", + "default": "1.4.0a1" + }, + "generated_at": { + "type": "string", + "format": "date-time", + "default": "2022-12-13T03:30:15.966964Z" + }, + "invocation_id": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "4f2b967b-7e02-46de-a7ea-268a05e3fab1" + }, + "env": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "default": {} + }, + "project_id": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A unique identifier for the project" + }, + "user_id": { + "oneOf": [ + { + "type": "string", + "pattern": "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, + { + "type": "null" + } + ], + "description": "A unique identifier for the user" + }, + "send_anonymous_usage_stats": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "description": "Whether dbt is configured to send anonymous usage statistics" + }, + "adapter_type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The type name of the adapter" + } + }, + "additionalProperties": false, + "description": "Metadata for the manifest." + }, + "AnalysisNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "analysis" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.970579 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "AnalysisNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" + }, + "FileHash": { + "type": "object", + "required": [ + "name", + "checksum" + ], + "properties": { + "name": { + "type": "string" + }, + "checksum": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "FileHash(name: str, checksum: str)" + }, + "NodeConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + }, + "alias": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tags": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ], + "default": [] + }, + "meta": { + "type": "object", + "default": {} + }, + "materialized": { + "type": "string", + "default": "view" + }, + "incremental_strategy": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "persist_docs": { + "type": "object", + "default": {} + }, + "post-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "pre-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "quoting": { + "type": "object", + "default": {} + }, + "column_types": { + "type": "object", + "default": {} + }, + "full_refresh": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "unique_key": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ] + }, + "on_schema_change": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "ignore" + }, + "grants": { + "type": "object", + "default": {} + }, + "packages": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + } + }, + "additionalProperties": true, + "description": "NodeConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = None, database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'view', incremental_strategy: Optional[str] = None, persist_docs: Dict[str, Any] = , post_hook: List[dbt.contracts.graph.model_config.Hook] = , pre_hook: List[dbt.contracts.graph.model_config.Hook] = , quoting: Dict[str, Any] = , column_types: Dict[str, Any] = , full_refresh: Optional[bool] = None, unique_key: Union[str, List[str], NoneType] = None, on_schema_change: Optional[str] = 'ignore', grants: Dict[str, Any] = , packages: List[str] = , docs: dbt.contracts.graph.unparsed.Docs = )" + }, + "Hook": { + "type": "object", + "required": [ + "sql" + ], + "properties": { + "sql": { + "type": "string" + }, + "transaction": { + "type": "boolean", + "default": true + }, + "index": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Hook(sql: str, transaction: bool = True, index: Optional[int] = None)" + }, + "Docs": { + "type": "object", + "required": [], + "properties": { + "show": { + "type": "boolean", + "default": true + }, + "node_color": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Docs(show: bool = True, node_color: Optional[str] = None)" + }, + "ColumnInfo": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string", + "default": "" + }, + "meta": { + "type": "object", + "default": {} + }, + "data_type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "quote": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + } + }, + "additionalProperties": true, + "description": "Used in all ManifestNodes and SourceDefinition" + }, + "DependsOn": { + "type": "object", + "required": [], + "properties": { + "macros": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "nodes": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "DependsOn(macros: List[str] = , nodes: List[str] = )" + }, + "InjectedCTE": { + "type": "object", + "required": [ + "id", + "sql" + ], + "properties": { + "id": { + "type": "string" + }, + "sql": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "Used in CompiledNodes as part of ephemeral model processing" + }, + "SingularTestNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "test" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/TestConfig", + "default": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "materialized": "test", + "severity": "ERROR", + "store_failures": null, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.973521 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "SingularTestNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" + }, + "TestConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + }, + "alias": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "dbt_test__audit" + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tags": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ], + "default": [] + }, + "meta": { + "type": "object", + "default": {} + }, + "materialized": { + "type": "string", + "default": "test" + }, + "severity": { + "type": "string", + "pattern": "^([Ww][Aa][Rr][Nn]|[Ee][Rr][Rr][Oo][Rr])$", + "default": "ERROR" + }, + "store_failures": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "where": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "limit": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "fail_calc": { + "type": "string", + "default": "count(*)" + }, + "warn_if": { + "type": "string", + "default": "!= 0" + }, + "error_if": { + "type": "string", + "default": "!= 0" + } + }, + "additionalProperties": true, + "description": "TestConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = 'dbt_test__audit', database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'test', severity: dbt.contracts.graph.model_config.Severity = 'ERROR', store_failures: Optional[bool] = None, where: Optional[str] = None, limit: Optional[int] = None, fail_calc: str = 'count(*)', warn_if: str = '!= 0', error_if: str = '!= 0')" + }, + "HookNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "operation" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.975156 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, + "index": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "HookNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None, index: Optional[int] = None)" + }, + "ModelNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "model" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.976732 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "ModelNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" + }, + "RPCNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "rpc" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.978195 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "RPCNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" + }, + "SqlNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "sql operation" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.979718 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "SqlNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" + }, + "GenericTestNode": { + "type": "object", + "required": [ + "test_metadata", + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "test_metadata": { + "$ref": "#/definitions/TestMetadata" + }, + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "test" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/TestConfig", + "default": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "materialized": "test", + "severity": "ERROR", + "store_failures": null, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.981434 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, + "column_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "file_key_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "GenericTestNode(test_metadata: dbt.contracts.graph.nodes.TestMetadata, database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None, column_name: Optional[str] = None, file_key_name: Optional[str] = None)" + }, + "TestMetadata": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "kwargs": { + "type": "object", + "default": {} + }, + "namespace": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "TestMetadata(name: str, kwargs: Dict[str, Any] = , namespace: Optional[str] = None)" + }, + "SnapshotNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum", + "config" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "snapshot" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/SnapshotConfig" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.984685 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "SnapshotNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.SnapshotConfig, _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" + }, + "SnapshotConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + }, + "alias": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tags": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ], + "default": [] + }, + "meta": { + "type": "object", + "default": {} + }, + "materialized": { + "type": "string", + "default": "snapshot" + }, + "incremental_strategy": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "persist_docs": { + "type": "object", + "default": {} + }, + "post-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "pre-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "quoting": { + "type": "object", + "default": {} + }, + "column_types": { + "type": "object", + "default": {} + }, + "full_refresh": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "unique_key": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "on_schema_change": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "ignore" + }, + "grants": { + "type": "object", + "default": {} + }, + "packages": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "strategy": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "target_schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "target_database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "updated_at": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "check_cols": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": true, + "description": "SnapshotConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = None, database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'snapshot', incremental_strategy: Optional[str] = None, persist_docs: Dict[str, Any] = , post_hook: List[dbt.contracts.graph.model_config.Hook] = , pre_hook: List[dbt.contracts.graph.model_config.Hook] = , quoting: Dict[str, Any] = , column_types: Dict[str, Any] = , full_refresh: Optional[bool] = None, unique_key: Optional[str] = None, on_schema_change: Optional[str] = 'ignore', grants: Dict[str, Any] = , packages: List[str] = , docs: dbt.contracts.graph.unparsed.Docs = , strategy: Optional[str] = None, target_schema: Optional[str] = None, target_database: Optional[str] = None, updated_at: Optional[str] = None, check_cols: Union[str, List[str], NoneType] = None)" + }, + "SeedNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "seed" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/SeedConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "seed", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "quote_columns": null, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.987447 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "root_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "SeedNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.SeedConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', root_path: Optional[str] = None)" + }, + "SeedConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + }, + "alias": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tags": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ], + "default": [] + }, + "meta": { + "type": "object", + "default": {} + }, + "materialized": { + "type": "string", + "default": "seed" + }, + "incremental_strategy": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "persist_docs": { + "type": "object", + "default": {} + }, + "post-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "pre-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "quoting": { + "type": "object", + "default": {} + }, + "column_types": { + "type": "object", + "default": {} + }, + "full_refresh": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "unique_key": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ] + }, + "on_schema_change": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "ignore" + }, + "grants": { + "type": "object", + "default": {} + }, + "packages": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "quote_columns": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": true, + "description": "SeedConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = None, database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'seed', incremental_strategy: Optional[str] = None, persist_docs: Dict[str, Any] = , post_hook: List[dbt.contracts.graph.model_config.Hook] = , pre_hook: List[dbt.contracts.graph.model_config.Hook] = , quoting: Dict[str, Any] = , column_types: Dict[str, Any] = , full_refresh: Optional[bool] = None, unique_key: Union[str, List[str], NoneType] = None, on_schema_change: Optional[str] = 'ignore', grants: Dict[str, Any] = , packages: List[str] = , docs: dbt.contracts.graph.unparsed.Docs = , quote_columns: Optional[bool] = None)" + }, + "SourceDefinition": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "source_name", + "source_description", + "loader", + "identifier" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "source" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "source_name": { + "type": "string" + }, + "source_description": { + "type": "string" + }, + "loader": { + "type": "string" + }, + "identifier": { + "type": "string" + }, + "quoting": { + "$ref": "#/definitions/Quoting", + "default": { + "database": null, + "schema": null, + "identifier": null, + "column": null + } + }, + "loaded_at_field": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "freshness": { + "oneOf": [ + { + "$ref": "#/definitions/FreshnessThreshold" + }, + { + "type": "null" + } + ] + }, + "external": { + "oneOf": [ + { + "$ref": "#/definitions/ExternalTable" + }, + { + "type": "null" + } + ] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "source_meta": { + "type": "object", + "default": {} + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "config": { + "$ref": "#/definitions/SourceConfig", + "default": { + "enabled": true + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "created_at": { + "type": "number", + "default": 1670902215.989922 + } + }, + "additionalProperties": false, + "description": "SourceDefinition(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], source_name: str, source_description: str, loader: str, identifier: str, _event_status: Dict[str, Any] = , quoting: dbt.contracts.graph.unparsed.Quoting = , loaded_at_field: Optional[str] = None, freshness: Optional[dbt.contracts.graph.unparsed.FreshnessThreshold] = None, external: Optional[dbt.contracts.graph.unparsed.ExternalTable] = None, description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , source_meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.SourceConfig = , patch_path: Optional[str] = None, unrendered_config: Dict[str, Any] = , relation_name: Optional[str] = None, created_at: float = )" + }, + "Quoting": { + "type": "object", + "required": [], + "properties": { + "database": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "identifier": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "column": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Quoting(database: Optional[bool] = None, schema: Optional[bool] = None, identifier: Optional[bool] = None, column: Optional[bool] = None)" + }, + "FreshnessThreshold": { + "type": "object", + "required": [], + "properties": { + "warn_after": { + "oneOf": [ + { + "$ref": "#/definitions/Time" + }, + { + "type": "null" + } + ], + "default": { + "count": null, + "period": null + } + }, + "error_after": { + "oneOf": [ + { + "$ref": "#/definitions/Time" + }, + { + "type": "null" + } + ], + "default": { + "count": null, + "period": null + } + }, + "filter": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "FreshnessThreshold(warn_after: Optional[dbt.contracts.graph.unparsed.Time] = , error_after: Optional[dbt.contracts.graph.unparsed.Time] = , filter: Optional[str] = None)" + }, + "FreshnessMetadata": { + "type": "object", + "required": [], + "properties": { + "dbt_schema_version": { + "type": "string", + "default": "https://schemas.getdbt.com/dbt/sources/v3.json" + }, + "dbt_version": { + "type": "string", + "default": "1.4.0a1" + }, + "generated_at": { + "type": "string", + "format": "date-time", + "default": "2022-12-13T03:30:15.961825Z" + }, + "invocation_id": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "4f2b967b-7e02-46de-a7ea-268a05e3fab1" + }, + "env": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "default": {} + } + }, + "additionalProperties": false, + "description": "FreshnessMetadata(dbt_schema_version: str = , dbt_version: str = '1.4.0a1', generated_at: datetime.datetime = , invocation_id: Optional[str] = , env: Dict[str, str] = )" + }, + "SourceFreshnessRuntimeError": { + "type": "object", + "required": [ + "unique_id", + "status" + ], + "properties": { + "unique_id": { + "type": "string" + }, + "error": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "status": { + "type": "string", + "enum": [ + "runtime error" + ] + } + }, + "additionalProperties": false, + "description": "SourceFreshnessRuntimeError(unique_id: str, error: Union[str, int, NoneType], status: dbt.contracts.results.FreshnessErrorEnum)" + }, + "SourceFreshnessOutput": { + "type": "object", + "required": [ + "unique_id", + "max_loaded_at", + "snapshotted_at", + "max_loaded_at_time_ago_in_s", + "status", + "criteria", + "adapter_response", + "timing", + "thread_id", + "execution_time" + ], + "properties": { + "unique_id": { + "type": "string" + }, + "max_loaded_at": { + "type": "string", + "format": "date-time" + }, + "snapshotted_at": { + "type": "string", + "format": "date-time" + }, + "max_loaded_at_time_ago_in_s": { + "type": "number" + }, + "status": { + "type": "string", + "enum": [ + "pass", + "warn", + "error", + "runtime error" + ] + }, + "criteria": { + "$ref": "#/definitions/FreshnessThreshold" + }, + "adapter_response": { + "type": "object" + }, + "timing": { + "type": "array", + "items": { + "$ref": "#/definitions/TimingInfo" + } + }, + "thread_id": { + "type": "string" + }, + "execution_time": { + "type": "number" + } + }, + "additionalProperties": false, + "description": "SourceFreshnessOutput(unique_id: str, max_loaded_at: datetime.datetime, snapshotted_at: datetime.datetime, max_loaded_at_time_ago_in_s: float, status: dbt.contracts.results.FreshnessStatus, criteria: dbt.contracts.graph.unparsed.FreshnessThreshold, adapter_response: Dict[str, Any], timing: List[dbt.contracts.results.TimingInfo], thread_id: str, execution_time: float)" + }, + "Time": { + "type": "object", + "required": [], + "properties": { + "count": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "period": { + "oneOf": [ + { + "type": "string", + "enum": [ + "minute", + "hour", + "day" + ] + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Time(count: Optional[int] = None, period: Optional[dbt.contracts.graph.unparsed.TimePeriod] = None)" + }, + "TimingInfo": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "started_at": { + "oneOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ] + }, + "completed_at": { + "oneOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "TimingInfo(name: str, started_at: Optional[datetime.datetime] = None, completed_at: Optional[datetime.datetime] = None)" + }, + "ExternalTable": { + "type": "object", + "required": [], + "properties": { + "location": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "file_format": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "row_format": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tbl_properties": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "partitions": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "array", + "items": { + "$ref": "#/definitions/ExternalPartition" + } + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": true, + "description": "ExternalTable(_extra: Dict[str, Any] = , location: Optional[str] = None, file_format: Optional[str] = None, row_format: Optional[str] = None, tbl_properties: Optional[str] = None, partitions: Union[List[str], List[dbt.contracts.graph.unparsed.ExternalPartition], NoneType] = None)" + }, + "ExternalPartition": { + "type": "object", + "required": [], + "properties": { + "name": { + "type": "string", + "default": "" + }, + "description": { + "type": "string", + "default": "" + }, + "data_type": { + "type": "string", + "default": "" + }, + "meta": { + "type": "object", + "default": {} + } + }, + "additionalProperties": true, + "description": "ExternalPartition(_extra: Dict[str, Any] = , name: str = '', description: str = '', data_type: str = '', meta: Dict[str, Any] = )" + }, + "SourceConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + } + }, + "additionalProperties": true, + "description": "SourceConfig(_extra: Dict[str, Any] = , enabled: bool = True)" + }, + "Macro": { + "type": "object", + "required": [ + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "macro_sql" + ], + "properties": { + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "macro" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "macro_sql": { + "type": "string" + }, + "depends_on": { + "$ref": "#/definitions/MacroDependsOn", + "default": { + "macros": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "arguments": { + "type": "array", + "items": { + "$ref": "#/definitions/MacroArgument" + }, + "default": [] + }, + "created_at": { + "type": "number", + "default": 1670902215.990816 + }, + "supported_languages": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string", + "enum": [ + "python", + "sql" + ] + } + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Macro(name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, macro_sql: str, depends_on: dbt.contracts.graph.nodes.MacroDependsOn = , description: str = '', meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, arguments: List[dbt.contracts.graph.unparsed.MacroArgument] = , created_at: float = , supported_languages: Optional[List[dbt.node_types.ModelLanguage]] = None)" + }, + "MacroDependsOn": { + "type": "object", + "required": [], + "properties": { + "macros": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "Used only in the Macro class" + }, + "MacroArgument": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "description": { + "type": "string", + "default": "" + } + }, + "additionalProperties": false, + "description": "MacroArgument(name: str, type: Optional[str] = None, description: str = '')" + }, + "Documentation": { + "type": "object", + "required": [ + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "block_contents" + ], + "properties": { + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "doc" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "block_contents": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "Documentation(name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, block_contents: str)" + }, + "Exposure": { + "type": "object", + "required": [ + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "type", + "owner" + ], + "properties": { + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "exposure" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "type": { + "type": "string", + "enum": [ + "dashboard", + "notebook", + "analysis", + "ml", + "application" + ] + }, + "owner": { + "$ref": "#/definitions/ExposureOwner" + }, + "description": { + "type": "string", + "default": "" + }, + "label": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "maturity": { + "oneOf": [ + { + "type": "string", + "enum": [ + "low", + "medium", + "high" + ] + }, + { + "type": "null" + } + ] + }, + "meta": { + "type": "object", + "default": {} + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "config": { + "$ref": "#/definitions/ExposureConfig", + "default": { + "enabled": true + } + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "url": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "created_at": { + "type": "number", + "default": 1670902215.993354 + } + }, + "additionalProperties": false, + "description": "Exposure(name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], type: dbt.contracts.graph.unparsed.ExposureType, owner: dbt.contracts.graph.unparsed.ExposureOwner, description: str = '', label: Optional[str] = None, maturity: Optional[dbt.contracts.graph.unparsed.MaturityType] = None, meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.ExposureConfig = , unrendered_config: Dict[str, Any] = , url: Optional[str] = None, depends_on: dbt.contracts.graph.nodes.DependsOn = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , created_at: float = )" + }, + "ExposureOwner": { + "type": "object", + "required": [ + "email" + ], + "properties": { + "email": { + "type": "string" + }, + "name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "ExposureOwner(email: str, name: Optional[str] = None)" + }, + "ExposureConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + } + }, + "additionalProperties": true, + "description": "ExposureConfig(_extra: Dict[str, Any] = , enabled: bool = True)" + }, + "Metric": { + "type": "object", + "required": [ + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "description", + "label", + "calculation_method", + "timestamp", + "expression", + "filters", + "time_grains", + "dimensions" + ], + "properties": { + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "metric" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "description": { + "type": "string" + }, + "label": { + "type": "string" + }, + "calculation_method": { + "type": "string" + }, + "timestamp": { + "type": "string" + }, + "expression": { + "type": "string" + }, + "filters": { + "type": "array", + "items": { + "$ref": "#/definitions/MetricFilter" + } + }, + "time_grains": { + "type": "array", + "items": { + "type": "string" + } + }, + "dimensions": { + "type": "array", + "items": { + "type": "string" + } + }, + "window": { + "oneOf": [ + { + "$ref": "#/definitions/MetricTime" + }, + { + "type": "null" + } + ] + }, + "model": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "model_unique_id": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "meta": { + "type": "object", + "default": {} + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "config": { + "$ref": "#/definitions/MetricConfig", + "default": { + "enabled": true + } + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "created_at": { + "type": "number", + "default": 1670902215.995033 + } + }, + "additionalProperties": false, + "description": "Metric(name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], description: str, label: str, calculation_method: str, timestamp: str, expression: str, filters: List[dbt.contracts.graph.unparsed.MetricFilter], time_grains: List[str], dimensions: List[str], window: Optional[dbt.contracts.graph.unparsed.MetricTime] = None, model: Optional[str] = None, model_unique_id: Optional[str] = None, meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.MetricConfig = , unrendered_config: Dict[str, Any] = , sources: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , refs: List[List[str]] = , metrics: List[List[str]] = , created_at: float = )" + }, + "MetricFilter": { + "type": "object", + "required": [ + "field", + "operator", + "value" + ], + "properties": { + "field": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "MetricFilter(field: str, operator: str, value: str)" + }, + "MetricTime": { + "type": "object", + "required": [], + "properties": { + "count": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "period": { + "oneOf": [ + { + "type": "string", + "enum": [ + "day", + "week", + "month", + "year" + ] + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "MetricTime(count: Optional[int] = None, period: Optional[dbt.contracts.graph.unparsed.MetricTimePeriod] = None)" + }, + "MetricConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + } + }, + "additionalProperties": true, + "description": "MetricConfig(_extra: Dict[str, Any] = , enabled: bool = True)" + } + }, + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "https://schemas.getdbt.com/dbt/manifest/v8.json" +} diff --git a/test/integration/023_exit_codes_tests/models/bad.sql b/test/integration/023_exit_codes_tests/models/bad.sql deleted file mode 100644 index dad7fe5fc10..00000000000 --- a/test/integration/023_exit_codes_tests/models/bad.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select bad sql here diff --git a/test/integration/023_exit_codes_tests/models/dupe.sql b/test/integration/023_exit_codes_tests/models/dupe.sql deleted file mode 100644 index f7bb37c8b71..00000000000 --- a/test/integration/023_exit_codes_tests/models/dupe.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select 1 as id, current_date as updated_at -union all -select 2 as id, current_date as updated_at -union all -select 3 as id, current_date as updated_at -union all -select 4 as id, current_date as updated_at diff --git a/test/integration/023_exit_codes_tests/models/good.sql b/test/integration/023_exit_codes_tests/models/good.sql deleted file mode 100644 index f7bb37c8b71..00000000000 --- a/test/integration/023_exit_codes_tests/models/good.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select 1 as id, current_date as updated_at -union all -select 2 as id, current_date as updated_at -union all -select 3 as id, current_date as updated_at -union all -select 4 as id, current_date as updated_at diff --git a/test/integration/023_exit_codes_tests/models/schema.yml b/test/integration/023_exit_codes_tests/models/schema.yml deleted file mode 100644 index f7243286b7b..00000000000 --- a/test/integration/023_exit_codes_tests/models/schema.yml +++ /dev/null @@ -1,17 +0,0 @@ -version: 2 -models: -- name: good - columns: - - name: updated_at - tests: - - not_null -- name: bad - columns: - - name: updated_at - tests: - - not_null -- name: dupe - columns: - - name: updated_at - tests: - - unique diff --git a/test/integration/023_exit_codes_tests/seeds-bad/data.csv b/test/integration/023_exit_codes_tests/seeds-bad/data.csv deleted file mode 100644 index fcc8e001bbd..00000000000 --- a/test/integration/023_exit_codes_tests/seeds-bad/data.csv +++ /dev/null @@ -1,2 +0,0 @@ -a,b,c -1,\2,3,a,a,a diff --git a/test/integration/023_exit_codes_tests/seeds-good/data.csv b/test/integration/023_exit_codes_tests/seeds-good/data.csv deleted file mode 100644 index bfde6bfa0b8..00000000000 --- a/test/integration/023_exit_codes_tests/seeds-good/data.csv +++ /dev/null @@ -1,2 +0,0 @@ -a,b,c -1,2,3 diff --git a/test/integration/023_exit_codes_tests/snapshots-bad/b.sql b/test/integration/023_exit_codes_tests/snapshots-bad/b.sql deleted file mode 100644 index 52425b7c9bc..00000000000 --- a/test/integration/023_exit_codes_tests/snapshots-bad/b.sql +++ /dev/null @@ -1,4 +0,0 @@ -{% snapshot good_snapshot %} - {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at_not_real')}} - select * from {{ schema }}.good -{% endsnapshot %} diff --git a/test/integration/023_exit_codes_tests/snapshots-good/g.sql b/test/integration/023_exit_codes_tests/snapshots-good/g.sql deleted file mode 100644 index 0c1205d9441..00000000000 --- a/test/integration/023_exit_codes_tests/snapshots-good/g.sql +++ /dev/null @@ -1,4 +0,0 @@ -{% snapshot good_snapshot %} - {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at')}} - select * from {{ schema }}.good -{% endsnapshot %} diff --git a/test/integration/023_exit_codes_tests/test_exit_codes.py b/test/integration/023_exit_codes_tests/test_exit_codes.py deleted file mode 100644 index 7da8d85e321..00000000000 --- a/test/integration/023_exit_codes_tests/test_exit_codes.py +++ /dev/null @@ -1,200 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - -import dbt.exceptions - - -class TestExitCodes(DBTIntegrationTest): - - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - "snapshot-paths": ['snapshots-good'], - } - - @use_profile('postgres') - def test_postgres_exit_code_run_succeed(self): - results, success = self.run_dbt_and_check(['run', '--model', 'good']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - self.assertTableDoesExist('good') - - @use_profile('postgres') - def test__postgres_exit_code_run_fail(self): - results, success = self.run_dbt_and_check(['run', '--model', 'bad']) - self.assertEqual(len(results.results), 1) - self.assertFalse(success) - self.assertTableDoesNotExist('bad') - - @use_profile('postgres') - def test__postgres_schema_test_pass(self): - results, success = self.run_dbt_and_check(['run', '--model', 'good']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - results, success = self.run_dbt_and_check(['test', '--model', 'good']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - - @use_profile('postgres') - def test__postgres_schema_test_fail(self): - results, success = self.run_dbt_and_check(['run', '--model', 'dupe']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - results, success = self.run_dbt_and_check(['test', '--model', 'dupe']) - self.assertEqual(len(results.results), 1) - self.assertFalse(success) - - @use_profile('postgres') - def test__postgres_compile(self): - results, success = self.run_dbt_and_check(['compile']) - self.assertEqual(len(results.results), 7) - self.assertTrue(success) - - @use_profile('postgres') - def test__postgres_snapshot_pass(self): - self.run_dbt_and_check(['run', '--model', 'good']) - results, success = self.run_dbt_and_check(['snapshot']) - self.assertEqual(len(results.results), 1) - self.assertTableDoesExist('good_snapshot') - self.assertTrue(success) - - -class TestExitCodesSnapshotFail(DBTIntegrationTest): - - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - "snapshot-paths": ['snapshots-bad'], - } - - @use_profile('postgres') - def test__postgres_snapshot_fail(self): - results, success = self.run_dbt_and_check(['run', '--model', 'good']) - self.assertTrue(success) - self.assertEqual(len(results.results), 1) - - results, success = self.run_dbt_and_check(['snapshot']) - self.assertEqual(len(results.results), 1) - self.assertTableDoesNotExist('good_snapshot') - self.assertFalse(success) - -class TestExitCodesDeps(DBTIntegrationTest): - - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def packages_config(self): - return { - "packages": [ - { - 'git': 'https://github.com/dbt-labs/dbt-integration-project', - 'revision': 'dbt/1.0.0', - } - ] - } - - @use_profile('postgres') - def test_postgres_deps(self): - _, success = self.run_dbt_and_check(['deps']) - self.assertTrue(success) - - -class TestExitCodesDepsFail(DBTIntegrationTest): - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def packages_config(self): - return { - "packages": [ - { - 'git': 'https://github.com/dbt-labs/dbt-integration-project', - 'revision': 'bad-branch', - }, - ] - } - - @use_profile('postgres') - def test_postgres_deps(self): - with self.assertRaises(dbt.exceptions.InternalException): - # this should fail - self.run_dbt_and_check(['deps']) - - -class TestExitCodesSeed(DBTIntegrationTest): - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seed-paths': ['seeds-good'], - 'seeds': { - 'quote_columns': False, - }, - } - - @use_profile('postgres') - def test_postgres_seed(self): - results, success = self.run_dbt_and_check(['seed']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - - -class TestExitCodesSeedFail(DBTIntegrationTest): - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seed-paths': ['seeds-bad'], - 'seeds': { - 'quote_columns': False, - }, - } - - @use_profile('postgres') - def test_postgres_seed(self): - _, success = self.run_dbt_and_check(['seed']) - self.assertFalse(success) diff --git a/test/integration/030_statement_tests/models/statement_actual.sql b/test/integration/030_statement_tests/models/statement_actual.sql deleted file mode 100644 index 8c550bc5dc1..00000000000 --- a/test/integration/030_statement_tests/models/statement_actual.sql +++ /dev/null @@ -1,23 +0,0 @@ - --- {{ ref('seed') }} - -{%- call statement('test_statement', fetch_result=True) -%} - - select - count(*) as "num_records" - - from {{ ref('seed') }} - -{%- endcall -%} - -{% set result = load_result('test_statement') %} - -{% set res_table = result['table'] %} -{% set res_matrix = result['data'] %} - -{% set matrix_value = res_matrix[0][0] %} -{% set table_value = res_table[0]['num_records'] %} - -select 'matrix' as source, {{ matrix_value }} as value -union all -select 'table' as source, {{ table_value }} as value diff --git a/test/integration/030_statement_tests/seed/statement_expected.csv b/test/integration/030_statement_tests/seed/statement_expected.csv deleted file mode 100644 index cf9d9af15ac..00000000000 --- a/test/integration/030_statement_tests/seed/statement_expected.csv +++ /dev/null @@ -1,3 +0,0 @@ -source,value -matrix,100 -table,100 diff --git a/test/integration/030_statement_tests/test_statements.py b/test/integration/030_statement_tests/test_statements.py deleted file mode 100644 index 4278f394580..00000000000 --- a/test/integration/030_statement_tests/test_statements.py +++ /dev/null @@ -1,36 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestStatements(DBTIntegrationTest): - - @property - def schema(self): - return "statements_030" - - @staticmethod - def dir(path): - return path.lstrip("/") - - @property - def models(self): - return self.dir("models") - - @property - def project_config(self): - return { - 'config-version': 2, - 'seeds': { - 'quote_columns': False, - } - } - - @use_profile("postgres") - def test_postgres_statements(self): - self.use_default_project({"seed-paths": [self.dir("seed")]}) - - results = self.run_dbt(["seed"]) - self.assertEqual(len(results), 2) - results = self.run_dbt() - self.assertEqual(len(results), 1) - - self.assertTablesEqual("statement_actual", "statement_expected") diff --git a/test/integration/031_thread_count_tests/models/.gitkeep b/test/integration/031_thread_count_tests/models/.gitkeep deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/integration/031_thread_count_tests/models/do_nothing_1.sql b/test/integration/031_thread_count_tests/models/do_nothing_1.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_1.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_10.sql b/test/integration/031_thread_count_tests/models/do_nothing_10.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_10.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_11.sql b/test/integration/031_thread_count_tests/models/do_nothing_11.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_11.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_12.sql b/test/integration/031_thread_count_tests/models/do_nothing_12.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_12.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_13.sql b/test/integration/031_thread_count_tests/models/do_nothing_13.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_13.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_14.sql b/test/integration/031_thread_count_tests/models/do_nothing_14.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_14.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_15.sql b/test/integration/031_thread_count_tests/models/do_nothing_15.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_15.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_16.sql b/test/integration/031_thread_count_tests/models/do_nothing_16.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_16.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_17.sql b/test/integration/031_thread_count_tests/models/do_nothing_17.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_17.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_18.sql b/test/integration/031_thread_count_tests/models/do_nothing_18.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_18.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_19.sql b/test/integration/031_thread_count_tests/models/do_nothing_19.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_19.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_2.sql b/test/integration/031_thread_count_tests/models/do_nothing_2.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_2.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_20.sql b/test/integration/031_thread_count_tests/models/do_nothing_20.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_20.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_3.sql b/test/integration/031_thread_count_tests/models/do_nothing_3.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_3.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_4.sql b/test/integration/031_thread_count_tests/models/do_nothing_4.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_4.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_5.sql b/test/integration/031_thread_count_tests/models/do_nothing_5.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_5.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_6.sql b/test/integration/031_thread_count_tests/models/do_nothing_6.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_6.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_7.sql b/test/integration/031_thread_count_tests/models/do_nothing_7.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_7.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_8.sql b/test/integration/031_thread_count_tests/models/do_nothing_8.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_8.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_9.sql b/test/integration/031_thread_count_tests/models/do_nothing_9.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_9.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/test_thread_count.py b/test/integration/031_thread_count_tests/test_thread_count.py deleted file mode 100644 index 042e2cd8a94..00000000000 --- a/test/integration/031_thread_count_tests/test_thread_count.py +++ /dev/null @@ -1,28 +0,0 @@ - -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestThreadCount(DBTIntegrationTest): - - @property - def project_config(self): - return {'config-version': 2} - - @property - def profile_config(self): - return { - 'threads': 2, - } - - @property - def schema(self): - return "thread_tests_031" - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_threading_8x(self): - results = self.run_dbt(args=['run', '--threads', '16']) - self.assertTrue(len(results), 20) diff --git a/test/integration/044_run_operations_tests/macros/sad_macros.sql b/test/integration/044_run_operations_tests/macros/sad_macros.sql deleted file mode 100644 index 4f2c80bc40f..00000000000 --- a/test/integration/044_run_operations_tests/macros/sad_macros.sql +++ /dev/null @@ -1,7 +0,0 @@ -{% macro syntax_error() %} - {% if execute %} - {% call statement() %} - select NOPE NOT A VALID QUERY - {% endcall %} - {% endif %} -{% endmacro %} diff --git a/test/integration/044_run_operations_tests/models/model.sql b/test/integration/044_run_operations_tests/models/model.sql deleted file mode 100644 index 43258a71464..00000000000 --- a/test/integration/044_run_operations_tests/models/model.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as id diff --git a/test/integration/044_run_operations_tests/test_run_operations.py b/test/integration/044_run_operations_tests/test_run_operations.py deleted file mode 100644 index d0308abe9b9..00000000000 --- a/test/integration/044_run_operations_tests/test_run_operations.py +++ /dev/null @@ -1,76 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import yaml - - -class TestOperations(DBTIntegrationTest): - @property - def schema(self): - return "run_operations_044" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - "macro-paths": ['macros'], - } - - def run_operation(self, macro, expect_pass=True, extra_args=None, **kwargs): - args = ['run-operation', macro] - if kwargs: - args.extend(('--args', yaml.safe_dump(kwargs))) - if extra_args: - args.extend(extra_args) - return self.run_dbt(args, expect_pass=expect_pass) - - @use_profile('postgres') - def test__postgres_macro_noargs(self): - self.run_operation('no_args') - self.assertTableDoesExist('no_args') - - @use_profile('postgres') - def test__postgres_macro_args(self): - self.run_operation('table_name_args', table_name='my_fancy_table') - self.assertTableDoesExist('my_fancy_table') - - @use_profile('postgres') - def test__postgres_macro_exception(self): - self.run_operation('syntax_error', False) - - @use_profile('postgres') - def test__postgres_macro_missing(self): - self.run_operation('this_macro_does_not_exist', False) - - @use_profile('postgres') - def test__postgres_cannot_connect(self): - self.run_operation('no_args', - extra_args=['--target', 'noaccess'], - expect_pass=False) - - @use_profile('postgres') - def test__postgres_vacuum(self): - self.run_dbt(['run']) - # this should succeed - self.run_operation('vacuum', table_name='model') - - @use_profile('postgres') - def test__postgres_vacuum_ref(self): - self.run_dbt(['run']) - # this should succeed - self.run_operation('vacuum_ref', ref_target='model') - - @use_profile('postgres') - def test__postgres_select(self): - self.run_operation('select_something', name='world') - - @use_profile('postgres') - def test__postgres_access_graph(self): - self.run_operation('log_graph') - - @use_profile('postgres') - def test__postgres_print(self): - # Tests that calling the `print()` macro does not cause an exception - self.run_operation('print_something') diff --git a/test/integration/049_dbt_debug_tests/models/model.sql b/test/integration/049_dbt_debug_tests/models/model.sql deleted file mode 100644 index 2c2d9c8de90..00000000000 --- a/test/integration/049_dbt_debug_tests/models/model.sql +++ /dev/null @@ -1 +0,0 @@ -seled 1 as id diff --git a/test/integration/049_dbt_debug_tests/test_debug.py b/test/integration/049_dbt_debug_tests/test_debug.py deleted file mode 100644 index 8a5fbd774f3..00000000000 --- a/test/integration/049_dbt_debug_tests/test_debug.py +++ /dev/null @@ -1,158 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import os -import re -import yaml - -import pytest - - -class TestDebug(DBTIntegrationTest): - @property - def schema(self): - return 'dbt_debug_049' - - @staticmethod - def dir(value): - return os.path.normpath(value) - - @property - def models(self): - return self.dir('models') - - def postgres_profile(self): - profile = super(TestDebug, self).postgres_profile() - profile['test']['outputs'].update({ - 'nopass': { - 'type': 'postgres', - 'threads': 4, - 'host': self.database_host, - 'port': 5432, - 'user': 'root', - # 'pass': 'password', - 'dbname': 'dbt', - 'schema': self.unique_schema() - }, - 'wronguser': { - 'type': 'postgres', - 'threads': 4, - 'host': self.database_host, - 'port': 5432, - 'user': 'notmyuser', - 'pass': 'notmypassword', - 'dbname': 'dbt', - 'schema': self.unique_schema() - }, - 'none_target': None - }) - return profile - - @pytest.fixture(autouse=True) - def capsys(self, capsys): - self.capsys = capsys - - def assertGotValue(self, linepat, result): - found = False - output = self.capsys.readouterr().out - for line in output.split('\n'): - if linepat.match(line): - found = True - self.assertIn(result, line, 'result "{}" not found in "{}" line'.format(result, linepat)) - self.assertTrue(found, 'linepat {} not found in stdout: {}'.format(linepat, output)) - - @use_profile('postgres') - def test_postgres_ok(self): - self.run_dbt(['debug']) - self.assertNotIn('ERROR', self.capsys.readouterr().out) - - @use_profile('postgres') - def test_postgres_nopass(self): - self.run_dbt(['debug', '--target', 'nopass'], expect_pass=False) - self.assertGotValue(re.compile(r'\s+profiles\.yml file'), 'ERROR invalid') - - @use_profile('postgres') - def test_postgres_wronguser(self): - self.run_dbt(['debug', '--target', 'wronguser'], expect_pass=False) - self.assertGotValue(re.compile(r'\s+Connection test'), 'ERROR') - - @use_profile('postgres') - def test_postgres_empty_target(self): - self.run_dbt(['debug', '--target', 'none_target'], expect_pass=False) - self.assertGotValue(re.compile(r"\s+output 'none_target'"), 'misconfigured') - - -class TestDebugProfileVariable(TestDebug): - @property - def project_config(self): - return { - 'config-version': 2, - 'profile': '{{ "te" ~ "st" }}' - } - - -class TestDebugInvalidProject(DBTIntegrationTest): - @property - def schema(self): - return 'dbt_debug_049' - - @staticmethod - def dir(value): - return os.path.normpath(value) - - @property - def models(self): - return self.dir('models') - - @pytest.fixture(autouse=True) - def capsys(self, capsys): - self.capsys = capsys - - @use_profile('postgres') - def test_postgres_empty_project(self): - with open('dbt_project.yml', 'w') as f: - pass - self.run_dbt(['debug', '--profile', 'test'], expect_pass=False) - splitout = self.capsys.readouterr().out.split('\n') - for line in splitout: - if line.strip().startswith('dbt_project.yml file'): - self.assertIn('ERROR invalid', line) - elif line.strip().startswith('profiles.yml file'): - self.assertNotIn('ERROR invalid', line) - - @use_profile('postgres') - def test_postgres_badproject(self): - # load a special project that is an error - self.use_default_project(overrides={ - 'invalid-key': 'not a valid key so this is bad project', - }) - self.run_dbt(['debug', '--profile', 'test'], expect_pass=False) - splitout = self.capsys.readouterr().out.split('\n') - for line in splitout: - if line.strip().startswith('dbt_project.yml file'): - self.assertIn('ERROR invalid', line) - elif line.strip().startswith('profiles.yml file'): - self.assertNotIn('ERROR invalid', line) - - @use_profile('postgres') - def test_postgres_not_found_project_dir(self): - self.run_dbt(['debug', '--project-dir', 'nopass'], expect_pass=False) - splitout = self.capsys.readouterr().out.split('\n') - for line in splitout: - if line.strip().startswith('dbt_project.yml file'): - self.assertIn('ERROR not found', line) - elif line.strip().startswith('profiles.yml file'): - self.assertNotIn('ERROR invalid', line) - - @use_profile('postgres') - def test_postgres_invalid_project_outside_current_dir(self): - # create a dbt_project.yml - project_config = { - 'invalid-key': 'not a valid key in this project' - } - os.makedirs('custom', exist_ok=True) - with open("custom/dbt_project.yml", 'w') as f: - yaml.safe_dump(project_config, f, default_flow_style=True) - self.run_dbt(['debug', '--project-dir', 'custom'], expect_pass=False) - splitout = self.capsys.readouterr().out.split('\n') - for line in splitout: - if line.strip().startswith('dbt_project.yml file'): - self.assertIn('ERROR invalid', line) diff --git a/test/integration/061_use_colors_tests/models/do_nothing_then_fail.sql b/test/integration/061_use_colors_tests/models/do_nothing_then_fail.sql deleted file mode 100644 index 30f1a53ec18..00000000000 --- a/test/integration/061_use_colors_tests/models/do_nothing_then_fail.sql +++ /dev/null @@ -1 +0,0 @@ -select 1, diff --git a/test/integration/061_use_colors_tests/test_no_use_colors.py b/test/integration/061_use_colors_tests/test_no_use_colors.py deleted file mode 100644 index a923c8d855e..00000000000 --- a/test/integration/061_use_colors_tests/test_no_use_colors.py +++ /dev/null @@ -1,29 +0,0 @@ - -from test.integration.base import DBTIntegrationTest, use_profile -import logging -import re -import sys - -class TestNoUseColors(DBTIntegrationTest): - - @property - def project_config(self): - return {'config-version': 2} - - @property - def schema(self): - return "use_colors_tests_061" - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_no_use_colors(self): - # pattern to match formatted log output - pattern = re.compile(r'\[31m.*|\[33m.*') - - results, stdout = self.run_dbt_and_capture(args=['--no-use-colors', 'run'], expect_pass=False) - - stdout_contains_formatting_characters = bool(pattern.search(stdout)) - self.assertFalse(stdout_contains_formatting_characters) diff --git a/test/integration/061_use_colors_tests/test_use_colors.py b/test/integration/061_use_colors_tests/test_use_colors.py deleted file mode 100644 index 6b3dac6a1f1..00000000000 --- a/test/integration/061_use_colors_tests/test_use_colors.py +++ /dev/null @@ -1,29 +0,0 @@ - -from test.integration.base import DBTIntegrationTest, use_profile -import logging -import re -import sys - -class TestUseColors(DBTIntegrationTest): - - @property - def project_config(self): - return {'config-version': 2} - - @property - def schema(self): - return "use_colors_tests_061" - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_use_colors(self): - # pattern to match formatted log output - pattern = re.compile(r'\[31m.*|\[33m.*') - - results, stdout = self.run_dbt_and_capture(args=['--use-colors', 'run'], expect_pass=False) - - stdout_contains_formatting_characters = bool(pattern.search(stdout)) - self.assertTrue(stdout_contains_formatting_characters) diff --git a/test/integration/062_defer_state_tests/test_defer_state.py b/test/integration/062_defer_state_tests/test_defer_state.py index 56004a1f28c..058e43ef05f 100644 --- a/test/integration/062_defer_state_tests/test_defer_state.py +++ b/test/integration/062_defer_state_tests/test_defer_state.py @@ -89,6 +89,9 @@ def run_and_snapshot_defer(self): # defer test, it succeeds results = self.run_dbt(['snapshot', '--state', 'state', '--defer']) + # favor_state test, it succeeds + results = self.run_dbt(['snapshot', '--state', 'state', '--defer', '--favor-state']) + def run_and_defer(self): results = self.run_dbt(['seed']) assert len(results) == 1 @@ -123,6 +126,40 @@ def run_and_defer(self): assert len(results) == 1 + def run_and_defer_favor_state(self): + results = self.run_dbt(['seed']) + assert len(results) == 1 + assert not any(r.node.deferred for r in results) + results = self.run_dbt(['run']) + assert len(results) == 2 + assert not any(r.node.deferred for r in results) + results = self.run_dbt(['test']) + assert len(results) == 2 + + # copy files over from the happy times when we had a good target + self.copy_state() + + # test tests first, because run will change things + # no state, wrong schema, failure. + self.run_dbt(['test', '--target', 'otherschema'], expect_pass=False) + + # no state, run also fails + self.run_dbt(['run', '--target', 'otherschema'], expect_pass=False) + + # defer test, it succeeds + results = self.run_dbt(['test', '-m', 'view_model+', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) + + # with state it should work though + results = self.run_dbt(['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) + assert self.other_schema not in results[0].node.compiled_code + assert self.unique_schema() in results[0].node.compiled_code + + with open('target/manifest.json') as fp: + data = json.load(fp) + assert data['nodes']['seed.test.seed']['deferred'] + + assert len(results) == 1 + def run_switchdirs_defer(self): results = self.run_dbt(['seed']) assert len(results) == 1 @@ -152,6 +189,35 @@ def run_switchdirs_defer(self): expect_pass=False, ) + def run_switchdirs_defer_favor_state(self): + results = self.run_dbt(['seed']) + assert len(results) == 1 + results = self.run_dbt(['run']) + assert len(results) == 2 + + # copy files over from the happy times when we had a good target + self.copy_state() + + self.use_default_project({'model-paths': ['changed_models']}) + # the sql here is just wrong, so it should fail + self.run_dbt( + ['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], + expect_pass=False, + ) + # but this should work since we just use the old happy model + self.run_dbt( + ['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], + expect_pass=True, + ) + + self.use_default_project({'model-paths': ['changed_models_bad']}) + # this should fail because the table model refs a broken ephemeral + # model, which it should see + self.run_dbt( + ['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], + expect_pass=False, + ) + def run_defer_iff_not_exists(self): results = self.run_dbt(['seed', '--target', 'otherschema']) assert len(results) == 1 @@ -169,6 +235,23 @@ def run_defer_iff_not_exists(self): assert self.other_schema not in results[0].node.compiled_code assert self.unique_schema() in results[0].node.compiled_code + def run_defer_iff_not_exists_favor_state(self): + results = self.run_dbt(['seed']) + assert len(results) == 1 + results = self.run_dbt(['run']) + assert len(results) == 2 + + # copy files over from the happy times when we had a good target + self.copy_state() + results = self.run_dbt(['seed']) + assert len(results) == 1 + results = self.run_dbt(['run', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) + assert len(results) == 2 + + # because the seed exists in other schema, we should defer it + assert self.other_schema not in results[0].node.compiled_code + assert self.unique_schema() in results[0].node.compiled_code + def run_defer_deleted_upstream(self): results = self.run_dbt(['seed']) assert len(results) == 1 @@ -191,6 +274,27 @@ def run_defer_deleted_upstream(self): assert self.other_schema not in results[0].node.compiled_code assert self.unique_schema() in results[0].node.compiled_code + def run_defer_deleted_upstream_favor_state(self): + results = self.run_dbt(['seed']) + assert len(results) == 1 + results = self.run_dbt(['run']) + assert len(results) == 2 + + # copy files over from the happy times when we had a good target + self.copy_state() + + self.use_default_project({'model-paths': ['changed_models_missing']}) + + self.run_dbt( + ['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], + expect_pass=True, + ) + + # despite deferral, test should use models just created in our schema + results = self.run_dbt(['test', '--state', 'state', '--defer', '--favor-state']) + assert self.other_schema not in results[0].node.compiled_code + assert self.unique_schema() in results[0].node.compiled_code + @use_profile('postgres') def test_postgres_state_changetarget(self): self.run_and_defer() @@ -199,18 +303,38 @@ def test_postgres_state_changetarget(self): with pytest.raises(SystemExit): self.run_dbt(['seed', '--defer']) + @use_profile('postgres') + def test_postgres_state_changetarget_favor_state(self): + self.run_and_defer_favor_state() + + # make sure these commands don't work with --defer + with pytest.raises(SystemExit): + self.run_dbt(['seed', '--defer']) + @use_profile('postgres') def test_postgres_state_changedir(self): self.run_switchdirs_defer() + @use_profile('postgres') + def test_postgres_state_changedir_favor_state(self): + self.run_switchdirs_defer_favor_state() + @use_profile('postgres') def test_postgres_state_defer_iffnotexists(self): self.run_defer_iff_not_exists() + @use_profile('postgres') + def test_postgres_state_defer_iffnotexists_favor_state(self): + self.run_defer_iff_not_exists_favor_state() + @use_profile('postgres') def test_postgres_state_defer_deleted_upstream(self): self.run_defer_deleted_upstream() + @use_profile('postgres') + def test_postgres_state_defer_deleted_upstream_favor_state(self): + self.run_defer_deleted_upstream_favor_state() + @use_profile('postgres') def test_postgres_state_snapshot_defer(self): self.run_and_snapshot_defer() diff --git a/test/integration/063_relation_name_tests/models/my_name_is_51_characters_incremental_abcdefghijklmn.sql b/test/integration/063_relation_name_tests/models/my_name_is_51_characters_incremental_abcdefghijklmn.sql deleted file mode 100644 index 0f6028e5306..00000000000 --- a/test/integration/063_relation_name_tests/models/my_name_is_51_characters_incremental_abcdefghijklmn.sql +++ /dev/null @@ -1,9 +0,0 @@ - -select * from {{ this.schema }}.seed - -{{ - config({ - "unique_key": "col_A", - "materialized": "incremental" - }) -}} diff --git a/test/integration/063_relation_name_tests/models/my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0.sql b/test/integration/063_relation_name_tests/models/my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0.sql deleted file mode 100644 index 3f6bdab0112..00000000000 --- a/test/integration/063_relation_name_tests/models/my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select * from {{ this.schema }}.seed - -{{ - config({ - "materialized": "table" - }) -}} diff --git a/test/integration/063_relation_name_tests/models/my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901.sql b/test/integration/063_relation_name_tests/models/my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901.sql deleted file mode 100644 index 3f6bdab0112..00000000000 --- a/test/integration/063_relation_name_tests/models/my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select * from {{ this.schema }}.seed - -{{ - config({ - "materialized": "table" - }) -}} diff --git a/test/integration/063_relation_name_tests/models/my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012.sql b/test/integration/063_relation_name_tests/models/my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012.sql deleted file mode 100644 index 3f6bdab0112..00000000000 --- a/test/integration/063_relation_name_tests/models/my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select * from {{ this.schema }}.seed - -{{ - config({ - "materialized": "table" - }) -}} diff --git a/test/integration/063_relation_name_tests/seeds/seed.csv b/test/integration/063_relation_name_tests/seeds/seed.csv deleted file mode 100644 index d4a1e26eed2..00000000000 --- a/test/integration/063_relation_name_tests/seeds/seed.csv +++ /dev/null @@ -1,4 +0,0 @@ -col_A,col_B -1,2 -3,4 -5,6 diff --git a/test/integration/063_relation_name_tests/test_relation_name.py b/test/integration/063_relation_name_tests/test_relation_name.py deleted file mode 100644 index df81b57f69b..00000000000 --- a/test/integration/063_relation_name_tests/test_relation_name.py +++ /dev/null @@ -1,74 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -from pytest import mark - - -class TestAdapterDDL(DBTIntegrationTest): - def setUp(self): - DBTIntegrationTest.setUp(self) - self.run_dbt(["seed"]) - - @property - def schema(self): - return "adapter_ddl_063" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - "config-version": 2, - "seeds": { - "quote_columns": False, - }, - } - - # 63 characters is the character limit for a table name in a postgres database - # (assuming compiled without changes from source) - @use_profile("postgres") - def test_postgres_name_longer_than_63_fails(self): - self.run_dbt( - [ - "run", - "-m", - "my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012", - ], - expect_pass=False, - ) - - @mark.skip( - reason="Backup table generation currently adds 12 characters to the relation name, meaning the current name limit is 51." - ) - @use_profile("postgres") - def test_postgres_name_shorter_or_equal_to_63_passes(self): - self.run_dbt( - [ - "run", - "-m", - "my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0" - "my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901", - ], - expect_pass=True, - ) - - @use_profile("postgres") - def test_postgres_long_name_passes_when_temp_tables_are_generated(self): - self.run_dbt( - [ - "run", - "-m", - "my_name_is_51_characters_incremental_abcdefghijklmn", - ], - expect_pass=True, - ) - - # Run again to trigger incremental materialization - self.run_dbt( - [ - "run", - "-m", - "my_name_is_51_characters_incremental_abcdefghijklmn", - ], - expect_pass=True, - ) diff --git a/test/integration/065_postgres_index_tests/models-invalid/invalid_columns_type.sql b/test/integration/065_postgres_index_tests/models-invalid/invalid_columns_type.sql deleted file mode 100644 index 10f41526abd..00000000000 --- a/test/integration/065_postgres_index_tests/models-invalid/invalid_columns_type.sql +++ /dev/null @@ -1,10 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'columns': 'column_a, column_b'}, - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/models-invalid/invalid_type.sql b/test/integration/065_postgres_index_tests/models-invalid/invalid_type.sql deleted file mode 100644 index 824ca36595f..00000000000 --- a/test/integration/065_postgres_index_tests/models-invalid/invalid_type.sql +++ /dev/null @@ -1,10 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'columns': ['column_a'], 'type': 'non_existent_type'}, - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/models-invalid/invalid_unique_config.sql b/test/integration/065_postgres_index_tests/models-invalid/invalid_unique_config.sql deleted file mode 100644 index ca0113272ea..00000000000 --- a/test/integration/065_postgres_index_tests/models-invalid/invalid_unique_config.sql +++ /dev/null @@ -1,10 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'columns': ['column_a'], 'unique': 'yes'}, - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/models-invalid/missing_columns.sql b/test/integration/065_postgres_index_tests/models-invalid/missing_columns.sql deleted file mode 100644 index 9b47943e6cf..00000000000 --- a/test/integration/065_postgres_index_tests/models-invalid/missing_columns.sql +++ /dev/null @@ -1,10 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'unique': True}, - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/models/incremental.sql b/test/integration/065_postgres_index_tests/models/incremental.sql deleted file mode 100644 index 7cd24bdcf8c..00000000000 --- a/test/integration/065_postgres_index_tests/models/incremental.sql +++ /dev/null @@ -1,18 +0,0 @@ -{{ - config( - materialized = "incremental", - indexes=[ - {'columns': ['column_a'], 'type': 'hash'}, - {'columns': ['column_a', 'column_b'], 'unique': True}, - ] - ) -}} - -select * -from ( - select 1 as column_a, 2 as column_b -) t - -{% if is_incremental() %} - where column_a > (select max(column_a) from {{this}}) -{% endif %} diff --git a/test/integration/065_postgres_index_tests/models/table.sql b/test/integration/065_postgres_index_tests/models/table.sql deleted file mode 100644 index 39fccc14b15..00000000000 --- a/test/integration/065_postgres_index_tests/models/table.sql +++ /dev/null @@ -1,14 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'columns': ['column_a']}, - {'columns': ['column_b']}, - {'columns': ['column_a', 'column_b']}, - {'columns': ['column_b', 'column_a'], 'type': 'btree', 'unique': True}, - {'columns': ['column_a'], 'type': 'hash'} - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/seeds/seed.csv b/test/integration/065_postgres_index_tests/seeds/seed.csv deleted file mode 100644 index e744edef675..00000000000 --- a/test/integration/065_postgres_index_tests/seeds/seed.csv +++ /dev/null @@ -1,4 +0,0 @@ -country_code,country_name -US,United States -CA,Canada -GB,United Kingdom diff --git a/test/integration/065_postgres_index_tests/snapshots/colors.sql b/test/integration/065_postgres_index_tests/snapshots/colors.sql deleted file mode 100644 index f3a901d615f..00000000000 --- a/test/integration/065_postgres_index_tests/snapshots/colors.sql +++ /dev/null @@ -1,29 +0,0 @@ -{% snapshot colors %} - - {{ - config( - target_database=database, - target_schema=schema, - unique_key='id', - strategy='check', - check_cols=['color'], - indexes=[ - {'columns': ['id'], 'type': 'hash'}, - {'columns': ['id', 'color'], 'unique': True}, - ] - ) - }} - - {% if var('version') == 1 %} - - select 1 as id, 'red' as color union all - select 2 as id, 'green' as color - - {% else %} - - select 1 as id, 'blue' as color union all - select 2 as id, 'green' as color - - {% endif %} - -{% endsnapshot %} diff --git a/test/integration/065_postgres_index_tests/test_postgres_indexes.py b/test/integration/065_postgres_index_tests/test_postgres_indexes.py deleted file mode 100644 index 56dc557d5ac..00000000000 --- a/test/integration/065_postgres_index_tests/test_postgres_indexes.py +++ /dev/null @@ -1,134 +0,0 @@ -import re - -from test.integration.base import DBTIntegrationTest, use_profile - - -INDEX_DEFINITION_PATTERN = re.compile(r'using\s+(\w+)\s+\((.+)\)\Z') - -class TestPostgresIndex(DBTIntegrationTest): - @property - def schema(self): - return "postgres_index_065" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seeds': { - 'quote_columns': False, - 'indexes': [ - {'columns': ['country_code'], 'unique': False, 'type': 'hash'}, - {'columns': ['country_code', 'country_name'], 'unique': True}, - ], - }, - 'vars': { - 'version': 1 - }, - } - - @use_profile('postgres') - def test__postgres__table(self): - results = self.run_dbt(['run', '--models', 'table']) - self.assertEqual(len(results), 1) - - indexes = self.get_indexes('table') - self.assertCountEqual( - indexes, - [ - {'columns': 'column_a', 'unique': False, 'type': 'btree'}, - {'columns': 'column_b', 'unique': False, 'type': 'btree'}, - {'columns': 'column_a, column_b', 'unique': False, 'type': 'btree'}, - {'columns': 'column_b, column_a', 'unique': True, 'type': 'btree'}, - {'columns': 'column_a', 'unique': False, 'type': 'hash'} - ] - ) - - @use_profile('postgres') - def test__postgres__incremental(self): - for additional_argument in [[], [], ['--full-refresh']]: - results = self.run_dbt(['run', '--models', 'incremental'] + additional_argument) - self.assertEqual(len(results), 1) - - indexes = self.get_indexes('incremental') - self.assertCountEqual( - indexes, - [ - {'columns': 'column_a', 'unique': False, 'type': 'hash'}, - {'columns': 'column_a, column_b', 'unique': True, 'type': 'btree'}, - ] - ) - - @use_profile('postgres') - def test__postgres__seed(self): - for additional_argument in [[], [], ['--full-refresh']]: - results = self.run_dbt(["seed"] + additional_argument) - self.assertEqual(len(results), 1) - - indexes = self.get_indexes('seed') - self.assertCountEqual( - indexes, - [ - {'columns': 'country_code', 'unique': False, 'type': 'hash'}, - {'columns': 'country_code, country_name', 'unique': True, 'type': 'btree'}, - ] - ) - - @use_profile('postgres') - def test__postgres__snapshot(self): - for version in [1, 2]: - results = self.run_dbt(["snapshot", '--vars', 'version: {}'.format(version)]) - self.assertEqual(len(results), 1) - - indexes = self.get_indexes('colors') - self.assertCountEqual( - indexes, - [ - {'columns': 'id', 'unique': False, 'type': 'hash'}, - {'columns': 'id, color', 'unique': True, 'type': 'btree'}, - ] - ) - - def get_indexes(self, table_name): - sql = """ - SELECT - pg_get_indexdef(idx.indexrelid) as index_definition - FROM pg_index idx - JOIN pg_class tab ON tab.oid = idx.indrelid - WHERE - tab.relname = '{table}' - AND tab.relnamespace = ( - SELECT oid FROM pg_namespace WHERE nspname = '{schema}' - ); - """ - - sql = sql.format(table=table_name, schema=self.unique_schema()) - results = self.run_sql(sql, fetch='all') - return [self.parse_index_definition(row[0]) for row in results] - - def parse_index_definition(self, index_definition): - index_definition = index_definition.lower() - is_unique = 'unique' in index_definition - m = INDEX_DEFINITION_PATTERN.search(index_definition) - return {'columns': m.group(2), 'unique': is_unique, 'type': m.group(1)} - -class TestPostgresInvalidIndex(DBTIntegrationTest): - @property - def schema(self): - return "postgres_index_065" - - @property - def models(self): - return "models-invalid" - - @use_profile('postgres') - def test__postgres__invalid_index_configs(self): - results, output = self.run_dbt_and_capture(expect_pass=False) - self.assertEqual(len(results), 4) - self.assertRegex(output, r'columns.*is not of type \'array\'') - self.assertRegex(output, r'unique.*is not of type \'boolean\'') - self.assertRegex(output, r'\'columns\' is a required property') - self.assertRegex(output, r'Database Error in model invalid_type') diff --git a/test/integration/067_store_test_failures_tests/models/fine_model.sql b/test/integration/067_store_test_failures_tests/models/fine_model.sql deleted file mode 100644 index 94b923a17c2..00000000000 --- a/test/integration/067_store_test_failures_tests/models/fine_model.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('people') }} diff --git a/test/integration/067_store_test_failures_tests/models/fine_model_but_with_a_no_good_very_long_name.sql b/test/integration/067_store_test_failures_tests/models/fine_model_but_with_a_no_good_very_long_name.sql deleted file mode 100644 index 97536ffaf06..00000000000 --- a/test/integration/067_store_test_failures_tests/models/fine_model_but_with_a_no_good_very_long_name.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as quite_long_column_name diff --git a/test/integration/067_store_test_failures_tests/models/problematic_model.sql b/test/integration/067_store_test_failures_tests/models/problematic_model.sql deleted file mode 100644 index e780d6b001e..00000000000 --- a/test/integration/067_store_test_failures_tests/models/problematic_model.sql +++ /dev/null @@ -1,11 +0,0 @@ -select * from {{ ref('people') }} - -union all - -select * from {{ ref('people') }} -where id in (1,2) - -union all - -select null as id, first_name, last_name, email, gender, ip_address from {{ ref('people') }} -where id in (3,4) diff --git a/test/integration/067_store_test_failures_tests/models/schema.yml b/test/integration/067_store_test_failures_tests/models/schema.yml deleted file mode 100644 index f01a9e350d8..00000000000 --- a/test/integration/067_store_test_failures_tests/models/schema.yml +++ /dev/null @@ -1,40 +0,0 @@ -version: 2 - -models: - - - name: fine_model - columns: - - name: id - tests: - - unique - - not_null - - - name: problematic_model - columns: - - name: id - tests: - - unique: - store_failures: true - - not_null - - name: first_name - tests: - # test truncation of really long test name - - accepted_values: - values: - - Jack - - Kathryn - - Gerald - - Bonnie - - Harold - - Jacqueline - - Wanda - - Craig - # - Gary - # - Rose - - - name: fine_model_but_with_a_no_good_very_long_name - columns: - - name: quite_long_column_name - tests: - # test truncation of really long test name with builtin - - unique diff --git a/test/integration/067_store_test_failures_tests/seeds/expected/expected_accepted_values.csv b/test/integration/067_store_test_failures_tests/seeds/expected/expected_accepted_values.csv deleted file mode 100644 index 02f28435b46..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/expected/expected_accepted_values.csv +++ /dev/null @@ -1,3 +0,0 @@ -value_field,n_records -Gary,1 -Rose,1 diff --git a/test/integration/067_store_test_failures_tests/seeds/expected/expected_failing_test.csv b/test/integration/067_store_test_failures_tests/seeds/expected/expected_failing_test.csv deleted file mode 100644 index d9e7257f122..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/expected/expected_failing_test.csv +++ /dev/null @@ -1,11 +0,0 @@ -id,first_name,last_name,email,gender,ip_address -1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 -2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 -3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 -4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 -5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 -6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 -7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 -8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 -9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 -10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 diff --git a/test/integration/067_store_test_failures_tests/seeds/expected/expected_not_null_problematic_model_id.csv b/test/integration/067_store_test_failures_tests/seeds/expected/expected_not_null_problematic_model_id.csv deleted file mode 100644 index 95fef8a2594..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/expected/expected_not_null_problematic_model_id.csv +++ /dev/null @@ -1,3 +0,0 @@ -id,first_name,last_name,email,gender,ip_address -,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 -,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 diff --git a/test/integration/067_store_test_failures_tests/seeds/expected/expected_unique_problematic_model_id.csv b/test/integration/067_store_test_failures_tests/seeds/expected/expected_unique_problematic_model_id.csv deleted file mode 100644 index 431d54ef8d0..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/expected/expected_unique_problematic_model_id.csv +++ /dev/null @@ -1,3 +0,0 @@ -unique_field,n_records -2,2 -1,2 \ No newline at end of file diff --git a/test/integration/067_store_test_failures_tests/seeds/people.csv b/test/integration/067_store_test_failures_tests/seeds/people.csv deleted file mode 100644 index d9e7257f122..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/people.csv +++ /dev/null @@ -1,11 +0,0 @@ -id,first_name,last_name,email,gender,ip_address -1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 -2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 -3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 -4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 -5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 -6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 -7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 -8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 -9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 -10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 diff --git a/test/integration/067_store_test_failures_tests/test_store_test_failures.py b/test/integration/067_store_test_failures_tests/test_store_test_failures.py deleted file mode 100644 index b0ba0875128..00000000000 --- a/test/integration/067_store_test_failures_tests/test_store_test_failures.py +++ /dev/null @@ -1,91 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestStoreTestFailures(DBTIntegrationTest): - @property - def schema(self): - return "test_store_test_failures_067" - - def tearDown(self): - test_audit_schema = self.unique_schema() + "_dbt_test__audit" - with self.adapter.connection_named('__test'): - self._drop_schema_named(self.default_database, test_audit_schema) - - super().tearDown() - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - "config-version": 2, - "test-paths": ["tests"], - "seeds": { - "quote_columns": False, - "test": { - "expected": self.column_type_overrides() - }, - }, - } - - def column_type_overrides(self): - return {} - - def run_tests_store_one_failure(self): - test_audit_schema = self.unique_schema() + "_dbt_test__audit" - - self.run_dbt(["seed"]) - self.run_dbt(["run"]) - self.run_dbt(["test"], expect_pass=False) - - # one test is configured with store_failures: true, make sure it worked - self.assertTablesEqual("unique_problematic_model_id", "expected_unique_problematic_model_id", test_audit_schema) - - def run_tests_store_failures_and_assert(self): - test_audit_schema = self.unique_schema() + "_dbt_test__audit" - - self.run_dbt(["seed"]) - self.run_dbt(["run"]) - # make sure this works idempotently for all tests - self.run_dbt(["test", "--store-failures"], expect_pass=False) - results = self.run_dbt(["test", "--store-failures"], expect_pass=False) - - # compare test results - actual = [(r.status, r.failures) for r in results] - expected = [('pass', 0), ('pass', 0), ('pass', 0), ('pass', 0), - ('fail', 2), ('fail', 2), ('fail', 2), ('fail', 10)] - self.assertEqual(sorted(actual), sorted(expected)) - - # compare test results stored in database - self.assertTablesEqual("failing_test", "expected_failing_test", test_audit_schema) - self.assertTablesEqual("not_null_problematic_model_id", "expected_not_null_problematic_model_id", test_audit_schema) - self.assertTablesEqual("unique_problematic_model_id", "expected_unique_problematic_model_id", test_audit_schema) - self.assertTablesEqual("accepted_values_problematic_mo_c533ab4ca65c1a9dbf14f79ded49b628", "expected_accepted_values", test_audit_schema) - - -class PostgresTestStoreTestFailures(TestStoreTestFailures): - - @property - def schema(self): - return "067" # otherwise too long + truncated - - def column_type_overrides(self): - return { - "expected_unique_problematic_model_id": { - "+column_types": { - "n_records": "bigint", - }, - }, - "expected_accepted_values": { - "+column_types": { - "n_records": "bigint", - }, - }, - } - - @use_profile('postgres') - def test__postgres__store_and_assert(self): - self.run_tests_store_one_failure() - self.run_tests_store_failures_and_assert() diff --git a/test/integration/067_store_test_failures_tests/tests/failing_test.sql b/test/integration/067_store_test_failures_tests/tests/failing_test.sql deleted file mode 100644 index 1bb5ae5ba6e..00000000000 --- a/test/integration/067_store_test_failures_tests/tests/failing_test.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('fine_model') }} diff --git a/test/integration/067_store_test_failures_tests/tests/passing_test.sql b/test/integration/067_store_test_failures_tests/tests/passing_test.sql deleted file mode 100644 index 15c9a7a642d..00000000000 --- a/test/integration/067_store_test_failures_tests/tests/passing_test.sql +++ /dev/null @@ -1,2 +0,0 @@ -select * from {{ ref('fine_model') }} -where false diff --git a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns.sql b/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns.sql deleted file mode 100644 index f9eebdcb852..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns.sql +++ /dev/null @@ -1,29 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='append_new_columns' - ) -}} - -{% set string_type = 'varchar(10)' %} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% if is_incremental() %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2, - cast(field3 as {{string_type}}) as field3, - cast(field4 as {{string_type}}) as field4 -FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2 -FROM source_data where id <= 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one.sql b/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one.sql deleted file mode 100644 index dbb4962a7e5..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one.sql +++ /dev/null @@ -1,28 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='append_new_columns' - ) -}} - -{% set string_type = 'varchar(10)' %} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% if is_incremental() %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field3 as {{string_type}}) as field3, - cast(field4 as {{string_type}}) as field4 -FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2 -FROM source_data where id <= 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one_target.sql deleted file mode 100644 index f3a279f0285..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one_target.sql +++ /dev/null @@ -1,19 +0,0 @@ -{{ - config(materialized='table') -}} - -{% set string_type = 'varchar(10)' %} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -select id, - cast(field1 as {{string_type}}) as field1, - cast(CASE WHEN id > 3 THEN NULL ELSE field2 END as {{string_type}}) AS field2, - cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3, - cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4 - -from source_data \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_target.sql deleted file mode 100644 index 5ff759d7dab..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_target.sql +++ /dev/null @@ -1,19 +0,0 @@ -{{ - config(materialized='table') -}} - -{% set string_type = 'varchar(10)' %} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -select id - ,cast(field1 as {{string_type}}) as field1 - ,cast(field2 as {{string_type}}) as field2 - ,cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3 - ,cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4 - -from source_data \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_fail.sql b/test/integration/070_incremental_schema_tests/models/incremental_fail.sql deleted file mode 100644 index 590f5b56d97..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_fail.sql +++ /dev/null @@ -1,19 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='fail' - ) -}} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% if is_incremental() %} - -SELECT id, field1, field2 FROM source_data - -{% else %} - -SELECT id, field1, field3 FROm source_data - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_ignore.sql b/test/integration/070_incremental_schema_tests/models/incremental_ignore.sql deleted file mode 100644 index 51dee6022fb..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_ignore.sql +++ /dev/null @@ -1,19 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='ignore' - ) -}} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% if is_incremental() %} - -SELECT id, field1, field2, field3, field4 FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -SELECT id, field1, field2 FROM source_data LIMIT 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_ignore_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_ignore_target.sql deleted file mode 100644 index 92d4564e0e8..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_ignore_target.sql +++ /dev/null @@ -1,15 +0,0 @@ -{{ - config(materialized='table') -}} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -select id - ,field1 - ,field2 - -from source_data \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns.sql b/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns.sql deleted file mode 100644 index b742c970419..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns.sql +++ /dev/null @@ -1,31 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='sync_all_columns' - - ) -}} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% set string_type = 'varchar(10)' %} - -{% if is_incremental() %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field3 as {{string_type}}) as field3, -- to validate new fields - cast(field4 as {{string_type}}) AS field4 -- to validate new fields - -FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -select id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2 - -from source_data where id <= 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns_target.sql deleted file mode 100644 index 6cdbaba5c0d..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns_target.sql +++ /dev/null @@ -1,20 +0,0 @@ -{{ - config(materialized='table') -}} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -{% set string_type = 'varchar(10)' %} - -select id - ,cast(field1 as {{string_type}}) as field1 - --,field2 - ,cast(case when id <= 3 then null else field3 end as {{string_type}}) as field3 - ,cast(case when id <= 3 then null else field4 end as {{string_type}}) as field4 - -from source_data -order by id \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only.sql b/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only.sql deleted file mode 100644 index 55bae0ad17e..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only.sql +++ /dev/null @@ -1,29 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='sync_all_columns' - - ) -}} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% set string_type = 'varchar(10)' %} - -{% if is_incremental() %} - -SELECT id, - cast(field1 as {{string_type}}) as field1 - -FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -select id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2 - -from source_data where id <= 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only_target.sql deleted file mode 100644 index ff88512c6f5..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only_target.sql +++ /dev/null @@ -1,17 +0,0 @@ -{{ - config(materialized='table') -}} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -{% set string_type = 'varchar(10)' %} - -select id - ,cast(field1 as {{string_type}}) as field1 - -from source_data -order by id \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/model_a.sql b/test/integration/070_incremental_schema_tests/models/model_a.sql deleted file mode 100644 index 2a0b2ddaff2..00000000000 --- a/test/integration/070_incremental_schema_tests/models/model_a.sql +++ /dev/null @@ -1,22 +0,0 @@ -{{ - config(materialized='table') -}} - -with source_data as ( - - select 1 as id, 'aaa' as field1, 'bbb' as field2, 111 as field3, 'TTT' as field4 - union all select 2 as id, 'ccc' as field1, 'ddd' as field2, 222 as field3, 'UUU' as field4 - union all select 3 as id, 'eee' as field1, 'fff' as field2, 333 as field3, 'VVV' as field4 - union all select 4 as id, 'ggg' as field1, 'hhh' as field2, 444 as field3, 'WWW' as field4 - union all select 5 as id, 'iii' as field1, 'jjj' as field2, 555 as field3, 'XXX' as field4 - union all select 6 as id, 'kkk' as field1, 'lll' as field2, 666 as field3, 'YYY' as field4 - -) - -select id - ,field1 - ,field2 - ,field3 - ,field4 - -from source_data \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/schema.yml b/test/integration/070_incremental_schema_tests/models/schema.yml deleted file mode 100644 index 5546314e413..00000000000 --- a/test/integration/070_incremental_schema_tests/models/schema.yml +++ /dev/null @@ -1,54 +0,0 @@ -version: 2 - -models: - - name: model_a - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_ignore - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_ignore_target - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_append_new_columns - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_append_new_columns_target - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_sync_all_columns - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_sync_all_columns_target - columns: - - name: id - tags: [column_leveL_tag] - tests: - - unique - - - \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/test_incremental_schema.py b/test/integration/070_incremental_schema_tests/test_incremental_schema.py deleted file mode 100644 index 09a494b8952..00000000000 --- a/test/integration/070_incremental_schema_tests/test_incremental_schema.py +++ /dev/null @@ -1,88 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestIncrementalSchemaChange(DBTIntegrationTest): - @property - def schema(self): - return "test_incremental_schema_070" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - "config-version": 2, - "test-paths": ["tests"] - } - - def run_twice_and_assert( - self, include, compare_source, compare_target - ): - - # dbt run (twice) - run_args = ['run'] - if include: - run_args.extend(('--models', include)) - results_one = self.run_dbt(run_args) - results_two = self.run_dbt(run_args) - - self.assertEqual(len(results_one), 3) - self.assertEqual(len(results_two), 3) - - self.assertTablesEqual(compare_source, compare_target) - - def run_incremental_ignore(self): - select = 'model_a incremental_ignore incremental_ignore_target' - compare_source = 'incremental_ignore' - compare_target = 'incremental_ignore_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_append_new_columns(self): - select = 'model_a incremental_append_new_columns incremental_append_new_columns_target' - compare_source = 'incremental_append_new_columns' - compare_target = 'incremental_append_new_columns_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_append_new_columns_remove_one(self): - select = 'model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target' - compare_source = 'incremental_append_new_columns_remove_one' - compare_target = 'incremental_append_new_columns_remove_one_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_sync_all_columns(self): - select = 'model_a incremental_sync_all_columns incremental_sync_all_columns_target' - compare_source = 'incremental_sync_all_columns' - compare_target = 'incremental_sync_all_columns_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_sync_remove_only(self): - select = 'model_a incremental_sync_remove_only incremental_sync_remove_only_target' - compare_source = 'incremental_sync_remove_only' - compare_target = 'incremental_sync_remove_only_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_fail_on_schema_change(self): - select = 'model_a incremental_fail' - results_one = self.run_dbt(['run', '--models', select, '--full-refresh']) - results_two = self.run_dbt(['run', '--models', select], expect_pass = False) - self.assertIn('Compilation Error', results_two[1].message) - - @use_profile('postgres') - def test__postgres__run_incremental_ignore(self): - self.run_incremental_ignore() - - @use_profile('postgres') - def test__postgres__run_incremental_append_new_columns(self): - self.run_incremental_append_new_columns() - self.run_incremental_append_new_columns_remove_one() - - @use_profile('postgres') - def test__postgres__run_incremental_sync_all_columns(self): - self.run_incremental_sync_all_columns() - self.run_incremental_sync_remove_only() - - @use_profile('postgres') - def test__postgres__run_incremental_fail_on_schema_change(self): - self.run_incremental_fail_on_schema_change() diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_a.sql b/test/integration/070_incremental_schema_tests/tests/select_from_a.sql deleted file mode 100644 index 3dc8f2857bd..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_a.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('model_a') }} where false diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns.sql deleted file mode 100644 index 947e8458854..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_append_new_columns') }} where false \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns_target.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns_target.sql deleted file mode 100644 index 8b86eddd71d..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns_target.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_append_new_columns_target') }} where false \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore.sql deleted file mode 100644 index d565c846465..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_ignore') }} where false diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore_target.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore_target.sql deleted file mode 100644 index 35d535c5ca5..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore_target.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_ignore_target') }} where false \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns.sql deleted file mode 100644 index aedc9f80396..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_sync_all_columns') }} where false \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns_target.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns_target.sql deleted file mode 100644 index 4b703c988bf..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns_target.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_sync_all_columns_target') }} where false \ No newline at end of file diff --git a/test/integration/base.py b/test/integration/base.py index 8b06782a334..602be18525c 100644 --- a/test/integration/base.py +++ b/test/integration/base.py @@ -1,6 +1,6 @@ +from io import StringIO import json import os -import io import random import shutil import sys @@ -26,7 +26,7 @@ from dbt.context import providers from dbt.logger import log_manager from dbt.events.functions import ( - capture_stdout_logs, fire_event, setup_event_logger, stop_capture_stdout_logs + capture_stdout_logs, fire_event, setup_event_logger, cleanup_event_logger, stop_capture_stdout_logs ) from dbt.events.test_types import ( IntegrationTestInfo, @@ -313,7 +313,7 @@ def setUp(self): os.chdir(self.initial_dir) # before we go anywhere, collect the initial path info self._logs_dir = os.path.join(self.initial_dir, 'logs', self.prefix) - setup_event_logger(self._logs_dir, None, False, True) + setup_event_logger(self._logs_dir, '', False, True) _really_makedirs(self._logs_dir) self.test_original_source_path = _pytest_get_test_root() self.test_root_dir = self._generate_test_root_dir() @@ -440,6 +440,8 @@ def tearDown(self): except EnvironmentError: msg = f"Could not clean up after test - {self.test_root_dir} not removable" fire_event(IntegrationTestException(msg=msg)) + + cleanup_event_logger() def _get_schema_fqn(self, database, schema): schema_fqn = self.quote_as_configured(schema, 'schema') @@ -524,7 +526,8 @@ def run_dbt(self, args=None, expect_pass=True, profiles_dir=True): def run_dbt_and_capture(self, *args, **kwargs): try: - stringbuf = capture_stdout_logs() + stringbuf = StringIO() + capture_stdout_logs(stringbuf) res = self.run_dbt(*args, **kwargs) stdout = stringbuf.getvalue() @@ -548,8 +551,8 @@ def run_dbt_and_check(self, args=None, profiles_dir=True): if profiles_dir: final_args.extend(['--profiles-dir', self.test_root_dir]) final_args.append('--log-cache-events') - msg = f"Invoking dbt with {final_args}" - fire_event(IntegrationTestInfo(msg=msg)) + # msg = f"Invoking dbt with {final_args}" + # fire_event(IntegrationTestInfo(msg=msg)) return dbt.handle_and_check(final_args) def run_sql_file(self, path, kwargs=None): diff --git a/test/unit/test_compiler.py b/test/unit/test_compiler.py index 506c427a067..649a5918f91 100644 --- a/test/unit/test_compiler.py +++ b/test/unit/test_compiler.py @@ -6,8 +6,7 @@ from dbt.adapters.postgres import Plugin from dbt.contracts.files import FileHash from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import NodeConfig, DependsOn, ParsedModelNode -from dbt.contracts.graph.compiled import CompiledModelNode, InjectedCTE +from dbt.contracts.graph.nodes import NodeConfig, DependsOn, ModelNode, InjectedCTE from dbt.node_types import NodeType from datetime import datetime @@ -86,7 +85,7 @@ def test__prepend_ctes__already_has_cte(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -95,7 +94,6 @@ def test__prepend_ctes__already_has_cte(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -103,7 +101,7 @@ def test__prepend_ctes__already_has_cte(self): raw_code='with cte as (select * from something_else) select * from {{ref("ephemeral")}}', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -112,7 +110,6 @@ def test__prepend_ctes__already_has_cte(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral.sql', original_file_path='ephemeral.sql', @@ -150,7 +147,7 @@ def test__prepend_ctes__no_ctes(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -159,7 +156,6 @@ def test__prepend_ctes__no_ctes(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -168,7 +164,7 @@ def test__prepend_ctes__no_ctes(self): 'select * from source_table'), checksum=FileHash.from_contents(''), ), - 'model.root.view_no_cte': ParsedModelNode( + 'model.root.view_no_cte': ModelNode( name='view_no_cte', database='dbt', schema='analytics', @@ -177,7 +173,6 @@ def test__prepend_ctes__no_ctes(self): unique_id='model.root.view_no_cte', fqn=['root', 'view_no_cte'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -228,7 +223,7 @@ def test__prepend_ctes(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -237,7 +232,6 @@ def test__prepend_ctes(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -245,7 +239,7 @@ def test__prepend_ctes(self): raw_code='select * from {{ref("ephemeral")}}', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -254,7 +248,6 @@ def test__prepend_ctes(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral.sql', original_file_path='ephemeral.sql', @@ -290,7 +283,7 @@ def test__prepend_ctes(self): def test__prepend_ctes__cte_not_compiled(self): ephemeral_config = self.model_config.replace(materialized='ephemeral') - parsed_ephemeral = ParsedModelNode( + parsed_ephemeral = ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -299,7 +292,6 @@ def test__prepend_ctes__cte_not_compiled(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', refs=[], sources=[], depends_on=DependsOn(), @@ -311,7 +303,7 @@ def test__prepend_ctes__cte_not_compiled(self): raw_code='select * from source_table', checksum=FileHash.from_contents(''), ) - compiled_ephemeral = CompiledModelNode( + compiled_ephemeral = ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -320,7 +312,6 @@ def test__prepend_ctes__cte_not_compiled(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', refs=[], sources=[], depends_on=DependsOn(), @@ -339,7 +330,7 @@ def test__prepend_ctes__cte_not_compiled(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': CompiledModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -348,7 +339,6 @@ def test__prepend_ctes__cte_not_compiled(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', refs=[], sources=[], depends_on=DependsOn(nodes=['model.root.ephemeral']), @@ -409,7 +399,7 @@ def test__prepend_ctes__multiple_levels(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -418,7 +408,6 @@ def test__prepend_ctes__multiple_levels(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -427,7 +416,7 @@ def test__prepend_ctes__multiple_levels(self): checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -436,7 +425,6 @@ def test__prepend_ctes__multiple_levels(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral.sql', original_file_path='ephemeral.sql', @@ -444,7 +432,7 @@ def test__prepend_ctes__multiple_levels(self): raw_code='select * from {{ref("ephemeral_level_two")}}', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral_level_two': ParsedModelNode( + 'model.root.ephemeral_level_two': ModelNode( name='ephemeral_level_two', database='dbt', schema='analytics', @@ -453,7 +441,6 @@ def test__prepend_ctes__multiple_levels(self): unique_id='model.root.ephemeral_level_two', fqn=['root', 'ephemeral_level_two'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral_level_two.sql', original_file_path='ephemeral_level_two.sql', @@ -500,7 +487,7 @@ def test__prepend_ctes__valid_ephemeral_sql(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -509,7 +496,6 @@ def test__prepend_ctes__valid_ephemeral_sql(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -517,7 +503,7 @@ def test__prepend_ctes__valid_ephemeral_sql(self): raw_code='select * from {{ref("ephemeral")}}', checksum=FileHash.from_contents(''), ), - 'model.root.inner_ephemeral': ParsedModelNode( + 'model.root.inner_ephemeral': ModelNode( name='inner_ephemeral', database='dbt', schema='analytics', @@ -526,7 +512,6 @@ def test__prepend_ctes__valid_ephemeral_sql(self): unique_id='model.root.inner_ephemeral', fqn=['root', 'inner_ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='inner_ephemeral.sql', original_file_path='inner_ephemeral.sql', @@ -534,7 +519,7 @@ def test__prepend_ctes__valid_ephemeral_sql(self): raw_code='select * from source_table', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -543,7 +528,6 @@ def test__prepend_ctes__valid_ephemeral_sql(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral.sql', original_file_path='ephemeral.sql', diff --git a/test/unit/test_config.py b/test/unit/test_config.py index 697dc05a1bb..880a09cc7ad 100644 --- a/test/unit/test_config.py +++ b/test/unit/test_config.py @@ -1086,35 +1086,6 @@ def test_archive_not_allowed(self): with self.assertRaises(dbt.exceptions.DbtProjectError): self.get_project() - def test__no_unused_resource_config_paths(self): - self.default_project_data.update({ - 'models': model_config, - 'seeds': {}, - }) - project = self.from_parts() - - resource_fqns = {'models': model_fqns} - unused = project.get_unused_resource_config_paths(resource_fqns, []) - self.assertEqual(len(unused), 0) - - def test__unused_resource_config_paths(self): - self.default_project_data.update({ - 'models': model_config['my_package_name'], - 'seeds': {}, - }) - project = self.from_parts() - - resource_fqns = {'models': model_fqns} - unused = project.get_unused_resource_config_paths(resource_fqns, []) - self.assertEqual(len(unused), 3) - - def test__get_unused_resource_config_paths_empty(self): - project = self.from_parts() - unused = project.get_unused_resource_config_paths({'models': frozenset(( - ('my_test_project', 'foo', 'bar'), - ('my_test_project', 'foo', 'baz'), - ))}, []) - self.assertEqual(len(unused), 0) def test__warn_for_unused_resource_config_paths_empty(self): project = self.from_parts() @@ -1174,26 +1145,17 @@ def from_parts(self, exc=None): else: return err - def test__get_unused_resource_config_paths(self): - project = self.from_parts() - unused = project.get_unused_resource_config_paths(self.used, []) - self.assertEqual(len(unused), 1) - self.assertEqual(unused[0], ('models', 'my_test_project', 'baz')) - @mock.patch.object(dbt.config.runtime, 'warn_or_error') - def test__warn_for_unused_resource_config_paths(self, warn_or_error): + def test__warn_for_unused_resource_config_paths(self): project = self.from_parts() - project.warn_for_unused_resource_config_paths(self.used, []) - warn_or_error.assert_called_once() - - def test__warn_for_unused_resource_config_paths_disabled(self): - project = self.from_parts() - unused = project.get_unused_resource_config_paths( - self.used, - frozenset([('my_test_project', 'baz')]) - ) - - self.assertEqual(len(unused), 0) + with mock.patch('dbt.config.runtime.warn_or_error') as warn_or_error_patch: + project.warn_for_unused_resource_config_paths(self.used, []) + warn_or_error_patch.assert_called_once() + event = warn_or_error_patch.call_args[0][0] + assert event.info.name == 'UnusedResourceConfigPath' + msg = event.info.msg + expected_msg = "- models.my_test_project.baz" + assert expected_msg in msg class TestRuntimeConfigFiles(BaseFileTest): diff --git a/test/unit/test_context.py b/test/unit/test_context.py index 668d76cc525..a567e032f55 100644 --- a/test/unit/test_context.py +++ b/test/unit/test_context.py @@ -10,11 +10,11 @@ from dbt.adapters import factory from dbt.adapters.base import AdapterConfig from dbt.clients.jinja import MacroStack -from dbt.contracts.graph.parsed import ( - ParsedModelNode, +from dbt.contracts.graph.nodes import ( + ModelNode, NodeConfig, DependsOn, - ParsedMacro, + Macro, ) from dbt.config.project import VarProvider from dbt.context import base, target, configured, providers, docs, manifest, macros @@ -33,7 +33,7 @@ class TestVar(unittest.TestCase): def setUp(self): - self.model = ParsedModelNode( + self.model = ModelNode( alias="model_one", name="model_one", database="dbt", @@ -43,7 +43,6 @@ def setUp(self): fqn=["root", "model_one"], package_name="root", original_file_path="model_one.sql", - root_path="/usr/src/app", refs=[], sources=[], depends_on=DependsOn(), @@ -202,6 +201,7 @@ def assert_has_keys(required_keys: Set[str], maybe_keys: Set[str], ctx: Dict[str "flags", "print", "diff_of_two_dicts", + "local_md5" } ) @@ -273,7 +273,7 @@ def assert_has_keys(required_keys: Set[str], maybe_keys: Set[str], ctx: Dict[str def model(): - return ParsedModelNode( + return ModelNode( alias="model_one", name="model_one", database="dbt", @@ -283,7 +283,6 @@ def model(): fqn=["root", "model_one"], package_name="root", original_file_path="model_one.sql", - root_path="/usr/src/app", refs=[], sources=[], depends_on=DependsOn(), @@ -316,7 +315,7 @@ def test_base_context(): def mock_macro(name, package_name): macro = mock.MagicMock( - __class__=ParsedMacro, + __class__=Macro, package_name=package_name, resource_type="macro", unique_id=f"macro.{package_name}.{name}", @@ -336,7 +335,7 @@ def mock_manifest(config): def mock_model(): return mock.MagicMock( - __class__=ParsedModelNode, + __class__=ModelNode, alias="model_one", name="model_one", database="dbt", @@ -346,7 +345,6 @@ def mock_model(): fqn=["root", "model_one"], package_name="root", original_file_path="model_one.sql", - root_path="/usr/src/app", refs=[], sources=[], depends_on=DependsOn(), @@ -432,7 +430,6 @@ def test_invocation_args_to_dict_in_macro_runtime_context( ) # Comes from dbt/flags.py as they are the only values set that aren't None at default - assert ctx["invocation_args_dict"]["event_buffer_size"] == 100000 assert ctx["invocation_args_dict"]["printer_width"] == 80 # Comes from unit/utils.py config_from_parts_or_dicts method diff --git a/test/unit/test_contracts_graph_compiled.py b/test/unit/test_contracts_graph_compiled.py index aaa44857326..fe1e25d7925 100644 --- a/test/unit/test_contracts_graph_compiled.py +++ b/test/unit/test_contracts_graph_compiled.py @@ -2,10 +2,10 @@ import pytest from dbt.contracts.files import FileHash -from dbt.contracts.graph.compiled import ( - CompiledModelNode, InjectedCTE, CompiledGenericTestNode +from dbt.contracts.graph.nodes import ( + ModelNode, InjectedCTE, GenericTestNode ) -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( DependsOn, NodeConfig, TestConfig, TestMetadata, ColumnInfo ) from dbt.node_types import NodeType @@ -22,9 +22,8 @@ @pytest.fixture def basic_uncompiled_model(): - return CompiledModelNode( + return ModelNode( package_name='test', - root_path='/root/', path='/root/models/foo.sql', original_file_path='models/foo.sql', language='sql', @@ -55,9 +54,8 @@ def basic_uncompiled_model(): @pytest.fixture def basic_compiled_model(): - return CompiledModelNode( + return ModelNode( package_name='test', - root_path='/root/', path='/root/models/foo.sql', original_file_path='models/foo.sql', language='sql', @@ -91,7 +89,6 @@ def basic_compiled_model(): def minimal_uncompiled_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Model), 'path': '/root/models/foo.sql', @@ -114,7 +111,6 @@ def minimal_uncompiled_dict(): def basic_uncompiled_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Model), 'path': '/root/models/foo.sql', @@ -164,7 +160,6 @@ def basic_uncompiled_dict(): def basic_compiled_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Model), 'path': '/root/models/foo.sql', @@ -215,19 +210,19 @@ def basic_compiled_dict(): def test_basic_uncompiled_model(minimal_uncompiled_dict, basic_uncompiled_dict, basic_uncompiled_model): node_dict = basic_uncompiled_dict node = basic_uncompiled_model - assert_symmetric(node, node_dict, CompiledModelNode) + assert_symmetric(node, node_dict, ModelNode) assert node.empty is False assert node.is_refable is True assert node.is_ephemeral is False - assert_from_dict(node, minimal_uncompiled_dict, CompiledModelNode) + assert_from_dict(node, minimal_uncompiled_dict, ModelNode) pickle.loads(pickle.dumps(node)) def test_basic_compiled_model(basic_compiled_dict, basic_compiled_model): node_dict = basic_compiled_dict node = basic_compiled_model - assert_symmetric(node, node_dict, CompiledModelNode) + assert_symmetric(node, node_dict, ModelNode) assert node.empty is False assert node.is_refable is True assert node.is_ephemeral is False @@ -236,13 +231,13 @@ def test_basic_compiled_model(basic_compiled_dict, basic_compiled_model): def test_invalid_extra_fields_model(minimal_uncompiled_dict): bad_extra = minimal_uncompiled_dict bad_extra['notvalid'] = 'nope' - assert_fails_validation(bad_extra, CompiledModelNode) + assert_fails_validation(bad_extra, ModelNode) def test_invalid_bad_type_model(minimal_uncompiled_dict): bad_type = minimal_uncompiled_dict bad_type['resource_type'] = str(NodeType.Macro) - assert_fails_validation(bad_type, CompiledModelNode) + assert_fails_validation(bad_type, ModelNode) unchanged_compiled_models = [ @@ -328,7 +323,6 @@ def test_compare_changed_model(func, basic_uncompiled_model): def minimal_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -352,9 +346,8 @@ def minimal_schema_test_dict(): @pytest.fixture def basic_uncompiled_schema_test_node(): - return CompiledGenericTestNode( + return GenericTestNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -386,9 +379,8 @@ def basic_uncompiled_schema_test_node(): @pytest.fixture def basic_compiled_schema_test_node(): - return CompiledGenericTestNode( + return GenericTestNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -426,7 +418,6 @@ def basic_compiled_schema_test_node(): def basic_uncompiled_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -477,7 +468,6 @@ def basic_uncompiled_schema_test_dict(): def basic_compiled_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -532,19 +522,19 @@ def test_basic_uncompiled_schema_test(basic_uncompiled_schema_test_node, basic_u node = basic_uncompiled_schema_test_node node_dict = basic_uncompiled_schema_test_dict minimum = minimal_schema_test_dict - assert_symmetric(node, node_dict, CompiledGenericTestNode) + assert_symmetric(node, node_dict, GenericTestNode) assert node.empty is False assert node.is_refable is False assert node.is_ephemeral is False - assert_from_dict(node, minimum, CompiledGenericTestNode) + assert_from_dict(node, minimum, GenericTestNode) def test_basic_compiled_schema_test(basic_compiled_schema_test_node, basic_compiled_schema_test_dict): node = basic_compiled_schema_test_node node_dict = basic_compiled_schema_test_dict - assert_symmetric(node, node_dict, CompiledGenericTestNode) + assert_symmetric(node, node_dict, GenericTestNode) assert node.empty is False assert node.is_refable is False assert node.is_ephemeral is False @@ -553,13 +543,13 @@ def test_basic_compiled_schema_test(basic_compiled_schema_test_node, basic_compi def test_invalid_extra_schema_test_fields(minimal_schema_test_dict): bad_extra = minimal_schema_test_dict bad_extra['extra'] = 'extra value' - assert_fails_validation(bad_extra, CompiledGenericTestNode) + assert_fails_validation(bad_extra, GenericTestNode) def test_invalid_resource_type_schema_test(minimal_schema_test_dict): bad_type = minimal_schema_test_dict bad_type['resource_type'] = str(NodeType.Model) - assert_fails_validation(bad_type, CompiledGenericTestNode) + assert_fails_validation(bad_type, GenericTestNode) unchanged_schema_tests = [ diff --git a/test/unit/test_contracts_graph_parsed.py b/test/unit/test_contracts_graph_parsed.py index b5ec79a7aba..ae792cdb718 100644 --- a/test/unit/test_contracts_graph_parsed.py +++ b/test/unit/test_contracts_graph_parsed.py @@ -13,23 +13,23 @@ EmptySnapshotConfig, Hook, ) -from dbt.contracts.graph.parsed import ( - ParsedModelNode, +from dbt.contracts.graph.nodes import ( + ModelNode, DependsOn, ColumnInfo, - ParsedGenericTestNode, - ParsedSnapshotNode, + GenericTestNode, + SnapshotNode, IntermediateSnapshotNode, ParsedNodePatch, - ParsedMacro, - ParsedExposure, - ParsedMetric, - ParsedSeedNode, + Macro, + Exposure, + Metric, + SeedNode, Docs, MacroDependsOn, - ParsedSourceDefinition, - ParsedDocumentation, - ParsedHookNode, + SourceDefinition, + Documentation, + HookNode, ExposureOwner, TestMetadata, ) @@ -127,7 +127,6 @@ def test_config_same(unrendered_node_config_dict, func): def base_parsed_model_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Model), 'path': '/root/x/path.sql', @@ -173,9 +172,8 @@ def base_parsed_model_dict(): @pytest.fixture def basic_parsed_model_object(): - return ParsedModelNode( + return ModelNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -204,7 +202,6 @@ def basic_parsed_model_object(): def minimal_parsed_model_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Model), 'path': '/root/x/path.sql', @@ -226,7 +223,6 @@ def minimal_parsed_model_dict(): def complex_parsed_model_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Model), 'path': '/root/x/path.sql', @@ -283,9 +279,8 @@ def complex_parsed_model_dict(): @pytest.fixture def complex_parsed_model_object(): - return ParsedModelNode( + return ModelNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -347,14 +342,14 @@ def test_invalid_bad_tags(base_parsed_model_dict): # bad top-level field bad_tags = base_parsed_model_dict bad_tags['tags'] = 100 - assert_fails_validation(bad_tags, ParsedModelNode) + assert_fails_validation(bad_tags, ModelNode) def test_invalid_bad_materialized(base_parsed_model_dict): # bad nested field bad_materialized = base_parsed_model_dict bad_materialized['config']['materialized'] = None - assert_fails_validation(bad_materialized, ParsedModelNode) + assert_fails_validation(bad_materialized, ModelNode) unchanged_nodes = [ @@ -428,20 +423,14 @@ def test_compare_changed_model(func, basic_parsed_model_object): def basic_parsed_seed_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Seed), 'path': '/root/seeds/seed.csv', 'original_file_path': 'seeds/seed.csv', 'package_name': 'test', - 'language': 'sql', 'raw_code': '', 'unique_id': 'seed.test.foo', 'fqn': ['test', 'seeds', 'foo'], - 'refs': [], - 'sources': [], - 'metrics': [], - 'depends_on': {'macros': [], 'nodes': []}, 'database': 'test_db', 'description': '', 'schema': 'test_schema', @@ -474,21 +463,15 @@ def basic_parsed_seed_dict(): @pytest.fixture def basic_parsed_seed_object(): - return ParsedSeedNode( + return SeedNode( name='foo', - root_path='/root/', resource_type=NodeType.Seed, path='/root/seeds/seed.csv', original_file_path='seeds/seed.csv', package_name='test', - language='sql', raw_code='', unique_id='seed.test.foo', fqn=['test', 'seeds', 'foo'], - refs=[], - sources=[], - metrics=[], - depends_on=DependsOn(), database='test_db', description='', schema='test_schema', @@ -509,13 +492,11 @@ def basic_parsed_seed_object(): def minimal_parsed_seed_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Seed), 'path': '/root/seeds/seed.csv', 'original_file_path': 'seeds/seed.csv', 'package_name': 'test', - 'language': 'sql', 'raw_code': '', 'unique_id': 'seed.test.foo', 'fqn': ['test', 'seeds', 'foo'], @@ -530,20 +511,14 @@ def minimal_parsed_seed_dict(): def complex_parsed_seed_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Seed), 'path': '/root/seeds/seed.csv', 'original_file_path': 'seeds/seed.csv', 'package_name': 'test', - 'language': 'sql', 'raw_code': '', 'unique_id': 'seed.test.foo', 'fqn': ['test', 'seeds', 'foo'], - 'refs': [], - 'sources': [], - 'metrics': [], - 'depends_on': {'macros': [], 'nodes': []}, 'database': 'test_db', 'description': 'a description', 'schema': 'test_schema', @@ -579,21 +554,15 @@ def complex_parsed_seed_dict(): @pytest.fixture def complex_parsed_seed_object(): - return ParsedSeedNode( + return SeedNode( name='foo', - root_path='/root/', resource_type=NodeType.Seed, path='/root/seeds/seed.csv', original_file_path='seeds/seed.csv', package_name='test', - language='sql', raw_code='', unique_id='seed.test.foo', fqn=['test', 'seeds', 'foo'], - refs=[], - sources=[], - metrics=[], - depends_on=DependsOn(), database='test_db', description='a description', schema='test_schema', @@ -615,10 +584,13 @@ def complex_parsed_seed_object(): def test_seed_basic(basic_parsed_seed_dict, basic_parsed_seed_object, minimal_parsed_seed_dict): + dct = basic_parsed_seed_object.to_dict() + assert_symmetric(basic_parsed_seed_object, basic_parsed_seed_dict) + assert basic_parsed_seed_object.get_materialization() == 'seed' - assert_from_dict(basic_parsed_seed_object, minimal_parsed_seed_dict, ParsedSeedNode) + assert_from_dict(basic_parsed_seed_object, minimal_parsed_seed_dict, SeedNode) def test_seed_complex(complex_parsed_seed_dict, complex_parsed_seed_object): @@ -729,9 +701,8 @@ def basic_parsed_model_patch_object(): @pytest.fixture def patched_model_object(): - return ParsedModelNode( + return ModelNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -771,7 +742,6 @@ def test_patch_parsed_model(basic_parsed_model_object, basic_parsed_model_patch_ def minimal_parsed_hook_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'resource_type': str(NodeType.Operation), 'path': '/root/x/path.sql', 'original_file_path': '/root/path.sql', @@ -791,7 +761,6 @@ def minimal_parsed_hook_dict(): def base_parsed_hook_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Operation), 'path': '/root/x/path.sql', @@ -837,9 +806,8 @@ def base_parsed_hook_dict(): @pytest.fixture def base_parsed_hook_object(): - return ParsedHookNode( + return HookNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -869,7 +837,6 @@ def base_parsed_hook_object(): def complex_parsed_hook_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Operation), 'path': '/root/x/path.sql', @@ -926,9 +893,8 @@ def complex_parsed_hook_dict(): @pytest.fixture def complex_parsed_hook_object(): - return ParsedHookNode( + return HookNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -968,11 +934,11 @@ def test_basic_parsed_hook(minimal_parsed_hook_dict, base_parsed_hook_dict, base node_dict = base_parsed_hook_dict minimum = minimal_parsed_hook_dict - assert_symmetric(node, node_dict, ParsedHookNode) + assert_symmetric(node, node_dict, HookNode) assert node.empty is False assert node.is_refable is False assert node.get_materialization() == 'view' - assert_from_dict(node, minimum, ParsedHookNode) + assert_from_dict(node, minimum, HookNode) pickle.loads(pickle.dumps(node)) @@ -989,14 +955,13 @@ def test_complex_parsed_hook(complex_parsed_hook_dict, complex_parsed_hook_objec def test_invalid_hook_index_type(base_parsed_hook_dict): bad_index = base_parsed_hook_dict bad_index['index'] = 'a string!?' - assert_fails_validation(bad_index, ParsedHookNode) + assert_fails_validation(bad_index, HookNode) @pytest.fixture def minimal_parsed_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -1023,7 +988,6 @@ def minimal_parsed_schema_test_dict(): def basic_parsed_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -1069,9 +1033,8 @@ def basic_parsed_schema_test_dict(): @pytest.fixture def basic_parsed_schema_test_object(): - return ParsedGenericTestNode( + return GenericTestNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1100,7 +1063,6 @@ def basic_parsed_schema_test_object(): def complex_parsed_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -1163,9 +1125,8 @@ def complex_parsed_schema_test_object(): severity='WARN' ) cfg._extra.update({'extra_key': 'extra value'}) - return ParsedGenericTestNode( + return GenericTestNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1201,20 +1162,20 @@ def test_basic_schema_test_node(minimal_parsed_schema_test_dict, basic_parsed_sc node = basic_parsed_schema_test_object node_dict = basic_parsed_schema_test_dict minimum = minimal_parsed_schema_test_dict - assert_symmetric(node, node_dict, ParsedGenericTestNode) + assert_symmetric(node, node_dict, GenericTestNode) assert node.empty is False assert node.is_ephemeral is False assert node.is_refable is False assert node.get_materialization() == 'test' - assert_from_dict(node, minimum, ParsedGenericTestNode) + assert_from_dict(node, minimum, GenericTestNode) pickle.loads(pickle.dumps(node)) def test_complex_schema_test_node(complex_parsed_schema_test_dict, complex_parsed_schema_test_object): # this tests for the presence of _extra keys - node = complex_parsed_schema_test_object # ParsedGenericTestNode + node = complex_parsed_schema_test_object # GenericTestNode assert(node.config._extra['extra_key']) node_dict = complex_parsed_schema_test_dict assert_symmetric(node, node_dict) @@ -1225,13 +1186,13 @@ def test_invalid_column_name_type(complex_parsed_schema_test_dict): # bad top-level field bad_column_name = complex_parsed_schema_test_dict bad_column_name['column_name'] = {} - assert_fails_validation(bad_column_name, ParsedGenericTestNode) + assert_fails_validation(bad_column_name, GenericTestNode) def test_invalid_severity(complex_parsed_schema_test_dict): invalid_config_value = complex_parsed_schema_test_dict invalid_config_value['config']['severity'] = 'WERROR' - assert_fails_validation(invalid_config_value, ParsedGenericTestNode) + assert_fails_validation(invalid_config_value, GenericTestNode) @pytest.fixture @@ -1459,7 +1420,6 @@ def test_invalid_check_value(basic_check_snapshot_config_dict): def basic_timestamp_snapshot_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Snapshot), 'path': '/root/x/path.sql', @@ -1516,9 +1476,8 @@ def basic_timestamp_snapshot_dict(): @pytest.fixture def basic_timestamp_snapshot_object(): - return ParsedSnapshotNode( + return SnapshotNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1567,7 +1526,6 @@ def basic_intermediate_timestamp_snapshot_object(): return IntermediateSnapshotNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1602,7 +1560,6 @@ def basic_intermediate_timestamp_snapshot_object(): def basic_check_snapshot_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Snapshot), 'path': '/root/x/path.sql', @@ -1659,9 +1616,8 @@ def basic_check_snapshot_dict(): @pytest.fixture def basic_check_snapshot_object(): - return ParsedSnapshotNode( + return SnapshotNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1710,7 +1666,6 @@ def basic_intermediate_check_snapshot_object(): return IntermediateSnapshotNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1746,10 +1701,10 @@ def test_timestamp_snapshot_ok(basic_timestamp_snapshot_dict, basic_timestamp_sn node = basic_timestamp_snapshot_object inter = basic_intermediate_timestamp_snapshot_object - assert_symmetric(node, node_dict, ParsedSnapshotNode) -# node_from_dict = ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) + assert_symmetric(node, node_dict, SnapshotNode) +# node_from_dict = SnapshotNode.from_dict(inter.to_dict(omit_none=True)) # node_from_dict.created_at = 1 - assert ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node + assert SnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node assert node.is_refable is True assert node.is_ephemeral is False pickle.loads(pickle.dumps(node)) @@ -1760,8 +1715,8 @@ def test_check_snapshot_ok(basic_check_snapshot_dict, basic_check_snapshot_objec node = basic_check_snapshot_object inter = basic_intermediate_check_snapshot_object - assert_symmetric(node, node_dict, ParsedSnapshotNode) - assert ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node + assert_symmetric(node, node_dict, SnapshotNode) + assert SnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node assert node.is_refable is True assert node.is_ephemeral is False pickle.loads(pickle.dumps(node)) @@ -1770,7 +1725,7 @@ def test_check_snapshot_ok(basic_check_snapshot_dict, basic_check_snapshot_objec def test_invalid_snapshot_bad_resource_type(basic_timestamp_snapshot_dict): bad_resource_type = basic_timestamp_snapshot_dict bad_resource_type['resource_type'] = str(NodeType.Model) - assert_fails_validation(bad_resource_type, ParsedSnapshotNode) + assert_fails_validation(bad_resource_type, SnapshotNode) def test_basic_parsed_node_patch(basic_parsed_model_patch_object, basic_parsed_model_patch_dict): @@ -1819,7 +1774,7 @@ def test_populated_parsed_node_patch(populated_parsed_node_patch_dict, populated class TestParsedMacro(ContractTestCase): - ContractType = ParsedMacro + ContractType = Macro def _ok_dict(self): return { @@ -1829,10 +1784,8 @@ def _ok_dict(self): 'created_at': 1.0, 'package_name': 'test', 'macro_sql': '{% macro foo() %}select 1 as id{% endmacro %}', - 'root_path': '/root/', 'resource_type': 'macro', 'unique_id': 'macro.test.foo', - 'tags': [], 'depends_on': {'macros': []}, 'meta': {}, 'description': 'my macro description', @@ -1848,10 +1801,8 @@ def test_ok(self): original_file_path='/root/path.sql', package_name='test', macro_sql='{% macro foo() %}select 1 as id{% endmacro %}', - root_path='/root/', resource_type=NodeType.Macro, unique_id='macro.test.foo', - tags=[], depends_on=MacroDependsOn(), meta={}, description='my macro description', @@ -1872,16 +1823,16 @@ def test_invalid_extra_field(self): class TestParsedDocumentation(ContractTestCase): - ContractType = ParsedDocumentation + ContractType = Documentation def _ok_dict(self): return { 'block_contents': 'some doc contents', 'name': 'foo', + 'resource_type': 'doc', 'original_file_path': '/root/docs/doc.md', 'package_name': 'test', 'path': '/root/docs', - 'root_path': '/root', 'unique_id': 'test.foo', } @@ -1889,12 +1840,12 @@ def test_ok(self): doc_dict = self._ok_dict() doc = self.ContractType( package_name='test', - root_path='/root', path='/root/docs', original_file_path='/root/docs/doc.md', name='foo', unique_id='test.foo', - block_contents='some doc contents' + block_contents='some doc contents', + resource_type=NodeType.Documentation, ) self.assert_symmetric(doc, doc_dict) pickle.loads(pickle.dumps(doc)) @@ -1914,7 +1865,6 @@ def test_invalid_extra(self): def minimum_parsed_source_definition_dict(): return { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/models/sources.yml', 'original_file_path': '/root/models/sources.yml', 'created_at': 1.0, @@ -1935,7 +1885,6 @@ def minimum_parsed_source_definition_dict(): def basic_parsed_source_definition_dict(): return { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/models/sources.yml', 'original_file_path': '/root/models/sources.yml', 'created_at': 1.0, @@ -1964,7 +1913,7 @@ def basic_parsed_source_definition_dict(): @pytest.fixture def basic_parsed_source_definition_object(): - return ParsedSourceDefinition( + return SourceDefinition( columns={}, database='some_db', description='', @@ -1977,7 +1926,6 @@ def basic_parsed_source_definition_object(): path='/root/models/sources.yml', quoting=Quoting(), resource_type=NodeType.Source, - root_path='/root', schema='some_schema', source_description='my source description', source_name='my_source', @@ -1991,7 +1939,6 @@ def basic_parsed_source_definition_object(): def complex_parsed_source_definition_dict(): return { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/models/sources.yml', 'original_file_path': '/root/models/sources.yml', 'created_at': 1.0, @@ -2025,7 +1972,7 @@ def complex_parsed_source_definition_dict(): @pytest.fixture def complex_parsed_source_definition_object(): - return ParsedSourceDefinition( + return SourceDefinition( columns={}, database='some_db', description='', @@ -2038,7 +1985,6 @@ def complex_parsed_source_definition_object(): path='/root/models/sources.yml', quoting=Quoting(), resource_type=NodeType.Source, - root_path='/root', schema='some_schema', source_description='my source description', source_name='my_source', @@ -2055,32 +2001,32 @@ def test_basic_source_definition(minimum_parsed_source_definition_dict, basic_pa node_dict = basic_parsed_source_definition_dict minimum = minimum_parsed_source_definition_dict - assert_symmetric(node, node_dict, ParsedSourceDefinition) + assert_symmetric(node, node_dict, SourceDefinition) assert node.is_ephemeral is False assert node.is_refable is False assert node.has_freshness is False - assert_from_dict(node, minimum, ParsedSourceDefinition) + assert_from_dict(node, minimum, SourceDefinition) pickle.loads(pickle.dumps(node)) def test_invalid_missing(minimum_parsed_source_definition_dict): bad_missing_name = minimum_parsed_source_definition_dict del bad_missing_name['name'] - assert_fails_validation(bad_missing_name, ParsedSourceDefinition) + assert_fails_validation(bad_missing_name, SourceDefinition) def test_invalid_bad_resource_type(minimum_parsed_source_definition_dict): bad_resource_type = minimum_parsed_source_definition_dict bad_resource_type['resource_type'] = str(NodeType.Model) - assert_fails_validation(bad_resource_type, ParsedSourceDefinition) + assert_fails_validation(bad_resource_type, SourceDefinition) def test_complex_source_definition(complex_parsed_source_definition_dict, complex_parsed_source_definition_object): node = complex_parsed_source_definition_object node_dict = complex_parsed_source_definition_dict - assert_symmetric(node, node_dict, ParsedSourceDefinition) + assert_symmetric(node, node_dict, SourceDefinition) assert node.is_ephemeral is False assert node.is_refable is False @@ -2146,10 +2092,10 @@ def minimal_parsed_exposure_dict(): 'meta': {}, 'tags': [], 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'description': '', 'created_at': 1.0, + 'resource_type': 'exposure', } @@ -2168,11 +2114,11 @@ def basic_parsed_exposure_dict(): }, 'refs': [], 'sources': [], + 'metrics': [], 'fqn': ['test', 'exposures', 'my_exposure'], 'unique_id': 'exposure.test.my_exposure', 'package_name': 'test', 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'description': '', 'meta': {}, @@ -2187,14 +2133,14 @@ def basic_parsed_exposure_dict(): @pytest.fixture def basic_parsed_exposure_object(): - return ParsedExposure( + return Exposure( name='my_exposure', + resource_type=NodeType.Exposure, type=ExposureType.Notebook, fqn=['test', 'exposures', 'my_exposure'], unique_id='exposure.test.my_exposure', package_name='test', path='models/something.yml', - root_path='/usr/src/app', original_file_path='models/something.yml', owner=ExposureOwner(email='test@example.com'), description='', @@ -2230,11 +2176,11 @@ def complex_parsed_exposure_dict(): }, 'refs': [], 'sources': [], + 'metrics': [], 'fqn': ['test', 'exposures', 'my_exposure'], 'unique_id': 'exposure.test.my_exposure', 'package_name': 'test', 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'config': { 'enabled': True, @@ -2245,8 +2191,9 @@ def complex_parsed_exposure_dict(): @pytest.fixture def complex_parsed_exposure_object(): - return ParsedExposure( + return Exposure( name='my_exposure', + resource_type=NodeType.Exposure, type=ExposureType.Analysis, owner=ExposureOwner(email='test@example.com', name='A Name'), maturity=MaturityType.Low, @@ -2259,7 +2206,6 @@ def complex_parsed_exposure_object(): unique_id='exposure.test.my_exposure', package_name='test', path='models/something.yml', - root_path='/usr/src/app', original_file_path='models/something.yml', config=ExposureConfig(), unrendered_config={}, @@ -2267,13 +2213,13 @@ def complex_parsed_exposure_object(): def test_basic_parsed_exposure(minimal_parsed_exposure_dict, basic_parsed_exposure_dict, basic_parsed_exposure_object): - assert_symmetric(basic_parsed_exposure_object, basic_parsed_exposure_dict, ParsedExposure) - assert_from_dict(basic_parsed_exposure_object, minimal_parsed_exposure_dict, ParsedExposure) + assert_symmetric(basic_parsed_exposure_object, basic_parsed_exposure_dict, Exposure) + assert_from_dict(basic_parsed_exposure_object, minimal_parsed_exposure_dict, Exposure) pickle.loads(pickle.dumps(basic_parsed_exposure_object)) def test_complex_parsed_exposure(complex_parsed_exposure_dict, complex_parsed_exposure_object): - assert_symmetric(complex_parsed_exposure_object, complex_parsed_exposure_dict, ParsedExposure) + assert_symmetric(complex_parsed_exposure_object, complex_parsed_exposure_dict, Exposure) unchanged_parsed_exposures = [ @@ -2318,7 +2264,6 @@ def minimal_parsed_metric_dict(): 'meta': {}, 'tags': [], 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'description': '', 'created_at': 1.0, @@ -2351,7 +2296,6 @@ def basic_parsed_metric_dict(): 'unique_id': 'metric.test.my_metric', 'package_name': 'test', 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'description': '', 'meta': {}, @@ -2366,14 +2310,14 @@ def basic_parsed_metric_dict(): @pytest.fixture def basic_parsed_metric_object(): - return ParsedMetric( + return Metric( name='my_metric', + resource_type=NodeType.Metric, calculation_method='count', fqn=['test', 'metrics', 'my_metric'], unique_id='metric.test.my_metric', package_name='test', path='models/something.yml', - root_path='/usr/src/app', original_file_path='models/something.yml', description='', meta={}, diff --git a/test/unit/test_contracts_graph_unparsed.py b/test/unit/test_contracts_graph_unparsed.py index 5c89148cd11..8821b355b71 100644 --- a/test/unit/test_contracts_graph_unparsed.py +++ b/test/unit/test_contracts_graph_unparsed.py @@ -24,7 +24,6 @@ def test_ok(self): 'package_name': 'test', 'language': 'sql', 'raw_code': '{% macro foo() %}select 1 as id{% endmacro %}', - 'root_path': '/root/', 'resource_type': 'macro', } macro = self.ContractType( @@ -33,7 +32,6 @@ def test_ok(self): package_name='test', language='sql', raw_code='{% macro foo() %}select 1 as id{% endmacro %}', - root_path='/root/', resource_type=NodeType.Macro, ) self.assert_symmetric(macro, macro_dict) @@ -46,7 +44,6 @@ def test_invalid_missing_field(self): # 'package_name': 'test', 'language': 'sql', 'raw_code': '{% macro foo() %}select 1 as id{% endmacro %}', - 'root_path': '/root/', 'resource_type': 'macro', } self.assert_fails_validation(macro_dict) @@ -58,7 +55,6 @@ def test_invalid_extra_field(self): 'package_name': 'test', 'language': 'sql', 'raw_code': '{% macro foo() %}select 1 as id{% endmacro %}', - 'root_path': '/root/', 'extra': 'extra', 'resource_type': 'macro', } @@ -71,7 +67,6 @@ class TestUnparsedNode(ContractTestCase): def test_ok(self): node_dict = { 'name': 'foo', - 'root_path': '/root/', 'resource_type': NodeType.Model, 'path': '/root/x/path.sql', 'original_file_path': '/root/path.sql', @@ -81,7 +76,6 @@ def test_ok(self): } node = self.ContractType( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -99,7 +93,6 @@ def test_ok(self): def test_empty(self): node_dict = { 'name': 'foo', - 'root_path': '/root/', 'resource_type': NodeType.Model, 'path': '/root/x/path.sql', 'original_file_path': '/root/path.sql', @@ -109,7 +102,6 @@ def test_empty(self): } node = UnparsedNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -126,7 +118,6 @@ def test_empty(self): def test_bad_type(self): node_dict = { 'name': 'foo', - 'root_path': '/root/', 'resource_type': NodeType.Source, # not valid! 'path': '/root/x/path.sql', 'original_file_path': '/root/path.sql', @@ -143,7 +134,6 @@ class TestUnparsedRunHook(ContractTestCase): def test_ok(self): node_dict = { 'name': 'foo', - 'root_path': 'test/dbt_project.yml', 'resource_type': NodeType.Operation, 'path': '/root/dbt_project.yml', 'original_file_path': '/root/dbt_project.yml', @@ -154,7 +144,6 @@ def test_ok(self): } node = self.ContractType( package_name='test', - root_path='test/dbt_project.yml', path='/root/dbt_project.yml', original_file_path='/root/dbt_project.yml', language='sql', @@ -170,7 +159,6 @@ def test_ok(self): def test_bad_type(self): node_dict = { 'name': 'foo', - 'root_path': 'test/dbt_project.yml', 'resource_type': NodeType.Model, # invalid 'path': '/root/dbt_project.yml', 'original_file_path': '/root/dbt_project.yml', @@ -365,14 +353,12 @@ class TestUnparsedDocumentationFile(ContractTestCase): def test_ok(self): doc = self.ContractType( package_name='test', - root_path='/root', path='/root/docs', original_file_path='/root/docs/doc.md', file_contents='blah blah blah', ) doc_dict = { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/docs', 'original_file_path': '/root/docs/doc.md', 'file_contents': 'blah blah blah', @@ -386,7 +372,6 @@ def test_extra_field(self): self.assert_fails_validation({}) doc_dict = { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/docs', 'original_file_path': '/root/docs/doc.md', 'file_contents': 'blah blah blah', diff --git a/test/unit/test_deps.py b/test/unit/test_deps.py index 30639473bff..c758e53bda9 100644 --- a/test/unit/test_deps.py +++ b/test/unit/test_deps.py @@ -7,21 +7,23 @@ import dbt.exceptions from dbt.deps.git import GitUnpinnedPackage from dbt.deps.local import LocalUnpinnedPackage +from dbt.deps.tarball import TarballUnpinnedPackage from dbt.deps.registry import RegistryUnpinnedPackage from dbt.clients.registry import is_compatible_version from dbt.deps.resolver import resolve_packages from dbt.contracts.project import ( LocalPackage, + TarballPackage, GitPackage, RegistryPackage, ) - from dbt.contracts.project import PackageConfig from dbt.semver import VersionSpecifier from dbt.version import get_installed_version from dbt.dataclass_schema import ValidationError + class TestLocalPackage(unittest.TestCase): def test_init(self): a_contract = LocalPackage.from_dict({'local': '/path/to/package'}) @@ -33,6 +35,45 @@ def test_init(self): self.assertEqual(str(a_pinned), '/path/to/package') +class TestTarballPackage(unittest.TestCase): + def test_TarballPackage(self): + from dbt.contracts.project import RegistryPackageMetadata + from mashumaro.exceptions import MissingField + + dict_well_formed_contract = ( + {'tarball': 'http://example.com', + 'name': 'my_cool_package'}) + + a_contract = ( + TarballPackage.from_dict(dict_well_formed_contract)) + + # check contract and resolver + self.assertEqual(a_contract.tarball, 'http://example.com') + self.assertEqual(a_contract.name, 'my_cool_package') + + a = TarballUnpinnedPackage.from_contract(a_contract) + self.assertEqual(a.tarball, 'http://example.com') + self.assertEqual(a.package, 'my_cool_package') + + a_pinned = a.resolved() + self.assertEqual(a_pinned.source_type(), 'tarball') + + # check bad contract (no name) fails + dict_missing_name_should_fail_on_contract = ( + {'tarball': 'http://example.com'}) + + with self.assertRaises(MissingField): + TarballPackage.from_dict(dict_missing_name_should_fail_on_contract) + + # check RegistryPackageMetadata - it is used in TarballUnpinnedPackage + dct = {'name' : a.package, + 'packages': [], # note: required by RegistryPackageMetadata + 'downloads' : {'tarball' : a_pinned.tarball}} + + metastore = RegistryPackageMetadata.from_dict(dct) + self.assertEqual(metastore.downloads.tarball, 'http://example.com') + + class TestGitPackage(unittest.TestCase): def test_init(self): a_contract = GitPackage.from_dict( diff --git a/test/unit/test_docs_blocks.py b/test/unit/test_docs_blocks.py index c6673321480..89821abfe12 100644 --- a/test/unit/test_docs_blocks.py +++ b/test/unit/test_docs_blocks.py @@ -3,7 +3,7 @@ from dbt.contracts.files import SourceFile, FileHash, FilePath from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedDocumentation +from dbt.contracts.graph.nodes import Documentation from dbt.node_types import NodeType from dbt.parser import docs from dbt.parser.search import FileBlock @@ -155,10 +155,9 @@ def test_load_file(self): docs_values = sorted(parser.manifest.docs.values(), key=lambda n: n.name) self.assertEqual(len(docs_values), 2) for result in docs_values: - self.assertIsInstance(result, ParsedDocumentation) + self.assertIsInstance(result, Documentation) self.assertEqual(result.package_name, 'some_package') self.assertEqual(result.original_file_path, self.testfile_path) - self.assertEqual(result.root_path, self.subdir_path) self.assertEqual(result.resource_type, NodeType.Documentation) self.assertEqual(result.path, 'test_file.md') @@ -180,7 +179,7 @@ def test_load_file_extras(self): docs_values = sorted(parser.manifest.docs.values(), key=lambda n: n.name) self.assertEqual(len(docs_values), 2) for result in docs_values: - self.assertIsInstance(result, ParsedDocumentation) + self.assertIsInstance(result, Documentation) self.assertEqual(docs_values[0].name, 'snowplow_sessions') self.assertEqual(docs_values[1].name, 'snowplow_sessions__session_id') @@ -197,10 +196,9 @@ def test_multiple_raw_blocks(self): docs_values = sorted(parser.manifest.docs.values(), key=lambda n: n.name) self.assertEqual(len(docs_values), 2) for result in docs_values: - self.assertIsInstance(result, ParsedDocumentation) + self.assertIsInstance(result, Documentation) self.assertEqual(result.package_name, 'some_package') self.assertEqual(result.original_file_path, self.testfile_path) - self.assertEqual(result.root_path, self.subdir_path) self.assertEqual(result.resource_type, NodeType.Documentation) self.assertEqual(result.path, 'test_file.md') diff --git a/test/unit/test_flags.py b/test/unit/test_flags.py index fc4455f5d1b..4be866338a2 100644 --- a/test/unit/test_flags.py +++ b/test/unit/test_flags.py @@ -261,18 +261,3 @@ def test__flags(self): # cleanup os.environ.pop('DBT_LOG_PATH') delattr(self.args, 'log_path') - - # event_buffer_size - self.user_config.event_buffer_size = 100 - flags.set_from_args(self.args, self.user_config) - self.assertEqual(flags.EVENT_BUFFER_SIZE, 100) - os.environ['DBT_EVENT_BUFFER_SIZE'] = '80' - flags.set_from_args(self.args, self.user_config) - self.assertEqual(flags.EVENT_BUFFER_SIZE, 80) - setattr(self.args, 'event_buffer_size', '120') - flags.set_from_args(self.args, self.user_config) - self.assertEqual(flags.EVENT_BUFFER_SIZE, 120) - # cleanup - os.environ.pop('DBT_EVENT_BUFFER_SIZE') - delattr(self.args, 'event_buffer_size') - self.user_config.event_buffer_size = None diff --git a/test/unit/test_graph_selector_methods.py b/test/unit/test_graph_selector_methods.py index e32267e2d6f..0497d5da02a 100644 --- a/test/unit/test_graph_selector_methods.py +++ b/test/unit/test_graph_selector_methods.py @@ -6,18 +6,18 @@ from pathlib import Path from dbt.contracts.files import FileHash -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( DependsOn, MacroDependsOn, NodeConfig, - ParsedMacro, - ParsedModelNode, - ParsedExposure, - ParsedMetric, - ParsedSeedNode, - ParsedSingularTestNode, - ParsedGenericTestNode, - ParsedSourceDefinition, + Macro, + ModelNode, + Exposure, + Metric, + SeedNode, + SingularTestNode, + GenericTestNode, + SourceDefinition, TestConfig, TestMetadata, ColumnInfo, @@ -42,7 +42,7 @@ MetricSelectorMethod, ) import dbt.exceptions -import dbt.contracts.graph.parsed +import dbt.contracts.graph.nodes from .utils import replace_config @@ -77,7 +77,7 @@ def make_model(pkg, name, sql, refs=None, sources=None, tags=None, path=None, al source_values.append([src.source_name, src.name]) depends_on_nodes.append(src.unique_id) - return ParsedModelNode( + return ModelNode( language='sql', raw_code=sql, database='dbt', @@ -87,7 +87,6 @@ def make_model(pkg, name, sql, refs=None, sources=None, tags=None, path=None, al fqn=fqn, unique_id=f'model.{pkg}.{name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=f'models/{path}', config=NodeConfig(**config_kwargs), @@ -118,9 +117,7 @@ def make_seed(pkg, name, path=None, loader=None, alias=None, tags=None, fqn_extr checksum = FileHash.from_contents('') fqn = [pkg] + fqn_extras + [name] - return ParsedSeedNode( - language='sql', - raw_code='', + return SeedNode( database='dbt', schema='dbt_schema', alias=alias, @@ -128,7 +125,6 @@ def make_seed(pkg, name, path=None, loader=None, alias=None, tags=None, fqn_extr fqn=fqn, unique_id=f'seed.{pkg}.{name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=f'data/{path}', tags=tags, @@ -150,13 +146,12 @@ def make_source(pkg, source_name, table_name, path=None, loader=None, identifier fqn = [pkg] + fqn_extras + [source_name, table_name] - return ParsedSourceDefinition( + return SourceDefinition( fqn=fqn, database='dbt', schema='dbt_schema', unique_id=f'source.{pkg}.{source_name}.{table_name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=path, name=table_name, @@ -177,16 +172,14 @@ def make_macro(pkg, name, macro_sql, path=None, depends_on_macros=None): if depends_on_macros is None: depends_on_macros = [] - return ParsedMacro( + return Macro( name=name, macro_sql=macro_sql, unique_id=f'macro.{pkg}.{name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=path, resource_type=NodeType.Macro, - tags=[], depends_on=MacroDependsOn(macros=depends_on_macros), ) @@ -204,7 +197,7 @@ def make_schema_test(pkg, test_name, test_model, test_kwargs, path=None, refs=No ref_values = [] source_values = [] # this doesn't really have to be correct - if isinstance(test_model, ParsedSourceDefinition): + if isinstance(test_model, SourceDefinition): kwargs['model'] = "{{ source('" + test_model.source_name + \ "', '" + test_model.name + "') }}" source_values.append([test_model.source_name, test_model.name]) @@ -251,7 +244,7 @@ def make_schema_test(pkg, test_name, test_model, test_kwargs, path=None, refs=No source_values.append([source.source_name, source.name]) depends_on_nodes.append(source.unique_id) - return ParsedGenericTestNode( + return GenericTestNode( language='sql', raw_code=raw_code, test_metadata=TestMetadata( @@ -266,7 +259,6 @@ def make_schema_test(pkg, test_name, test_model, test_kwargs, path=None, refs=No fqn=['minimal', 'schema_test', node_name], unique_id=f'test.{pkg}.{node_name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=f'schema_test/{node_name}.sql', original_file_path=f'models/{path}', resource_type=NodeType.Test, @@ -308,7 +300,7 @@ def make_data_test(pkg, name, sql, refs=None, sources=None, tags=None, path=None source_values.append([src.source_name, src.name]) depends_on_nodes.append(src.unique_id) - return ParsedSingularTestNode( + return SingularTestNode( language='sql', raw_code=sql, database='dbt', @@ -318,7 +310,6 @@ def make_data_test(pkg, name, sql, refs=None, sources=None, tags=None, path=None fqn=fqn, unique_id=f'test.{pkg}.{name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=f'tests/{path}', config=TestConfig(**config_kwargs), @@ -342,14 +333,14 @@ def make_exposure(pkg, name, path=None, fqn_extras=None, owner=None): owner = ExposureOwner(email='test@example.com') fqn = [pkg, 'exposures'] + fqn_extras + [name] - return ParsedExposure( + return Exposure( name=name, + resource_type=NodeType.Exposure, type=ExposureType.Notebook, fqn=fqn, unique_id=f'exposure.{pkg}.{name}', package_name=pkg, path=path, - root_path='/usr/src/app', original_file_path=path, owner=owner, ) @@ -359,11 +350,11 @@ def make_metric(pkg, name, path=None): if path is None: path = 'schema.yml' - return ParsedMetric( + return Metric( name=name, + resource_type=NodeType.Metric, path='schema.yml', package_name=pkg, - root_path='/usr/src/app', original_file_path=path, unique_id=f'metric.{pkg}.{name}', fqn=[pkg, 'metrics', name], @@ -978,12 +969,14 @@ def test_select_state_changed_seed_checksum_path_to_path(manifest, previous_stat change_node(manifest, seed.replace(checksum=FileHash( name='path', checksum=seed.original_file_path))) method = statemethod(manifest, previous_state) - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'modified') warn_or_error_patch.assert_called_once() - msg = warn_or_error_patch.call_args[0][0] + event = warn_or_error_patch.call_args[0][0] + assert event.info.name == 'SeedExceedsLimitSamePath' + msg = event.info.msg assert msg.startswith('Found a seed (pkg.seed) >1MB in size') - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') warn_or_error_patch.assert_not_called() @@ -992,13 +985,15 @@ def test_select_state_changed_seed_checksum_sha_to_path(manifest, previous_state change_node(manifest, seed.replace(checksum=FileHash( name='path', checksum=seed.original_file_path))) method = statemethod(manifest, previous_state) - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert search_manifest_using_method( manifest, method, 'modified') == {'seed'} warn_or_error_patch.assert_called_once() - msg = warn_or_error_patch.call_args[0][0] + event = warn_or_error_patch.call_args[0][0] + assert event.info.name == 'SeedIncreased' + msg = event.info.msg assert msg.startswith('Found a seed (pkg.seed) >1MB in size') - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') warn_or_error_patch.assert_not_called() @@ -1007,11 +1002,11 @@ def test_select_state_changed_seed_checksum_path_to_sha(manifest, previous_state change_node(previous_state.manifest, seed.replace( checksum=FileHash(name='path', checksum=seed.original_file_path))) method = statemethod(manifest, previous_state) - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert search_manifest_using_method( manifest, method, 'modified') == {'seed'} warn_or_error_patch.assert_not_called() - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') warn_or_error_patch.assert_not_called() diff --git a/test/unit/test_macro_resolver.py b/test/unit/test_macro_resolver.py index 17e1aca6dca..3e0b7622bce 100644 --- a/test/unit/test_macro_resolver.py +++ b/test/unit/test_macro_resolver.py @@ -1,15 +1,15 @@ import unittest from unittest import mock -from dbt.contracts.graph.parsed import ( - ParsedMacro +from dbt.contracts.graph.nodes import ( + Macro ) from dbt.context.macro_resolver import MacroResolver def mock_macro(name, package_name): macro = mock.MagicMock( - __class__=ParsedMacro, + __class__=Macro, package_name=package_name, resource_type='macro', unique_id=f'macro.{package_name}.{name}', diff --git a/test/unit/test_manifest.py b/test/unit/test_manifest.py index cbce93fc052..576a525823b 100644 --- a/test/unit/test_manifest.py +++ b/test/unit/test_manifest.py @@ -15,14 +15,14 @@ from dbt.adapters.base.plugin import AdapterPlugin from dbt.contracts.files import FileHash from dbt.contracts.graph.manifest import Manifest, ManifestMetadata -from dbt.contracts.graph.parsed import ( - ParsedModelNode, +from dbt.contracts.graph.nodes import ( + ModelNode, DependsOn, NodeConfig, - ParsedSeedNode, - ParsedSourceDefinition, - ParsedExposure, - ParsedMetric + SeedNode, + SourceDefinition, + Exposure, + Metric ) from dbt.contracts.graph.unparsed import ( @@ -33,7 +33,6 @@ MetricTime ) -from dbt.contracts.graph.compiled import CompiledModelNode from dbt.events.functions import reset_metadata_vars from dbt.node_types import NodeType @@ -45,9 +44,9 @@ REQUIRED_PARSED_NODE_KEYS = frozenset({ 'alias', 'tags', 'config', 'unique_id', 'refs', 'sources', 'metrics', 'meta', 'depends_on', 'database', 'schema', 'name', 'resource_type', - 'package_name', 'root_path', 'path', 'original_file_path', 'raw_code', 'language', + 'package_name', 'path', 'original_file_path', 'raw_code', 'language', 'description', 'columns', 'fqn', 'build_path', 'compiled_path', 'patch_path', 'docs', - 'deferred', 'checksum', 'unrendered_config', 'created_at', 'config_call_dict', + 'deferred', 'checksum', 'unrendered_config', 'created_at', 'config_call_dict', 'relation_name', }) REQUIRED_COMPILED_NODE_KEYS = frozenset(REQUIRED_PARSED_NODE_KEYS | { @@ -81,7 +80,7 @@ def setUp(self): }) self.exposures = { - 'exposure.root.my_exposure': ParsedExposure( + 'exposure.root.my_exposure': Exposure( name='my_exposure', type=ExposureType.Dashboard, owner=ExposureOwner(email='some@email.com'), @@ -95,14 +94,13 @@ def setUp(self): fqn=['root', 'my_exposure'], unique_id='exposure.root.my_exposure', package_name='root', - root_path='', path='my_exposure.sql', original_file_path='my_exposure.sql' ) } self.metrics = { - 'metric.root.my_metric': ParsedMetric( + 'metric.root.my_metric': Metric( name='new_customers', label='New Customers', model='ref("multi")', @@ -128,14 +126,13 @@ def setUp(self): fqn=['root', 'my_metric'], unique_id='metric.root.my_metric', package_name='root', - root_path='', path='my_metric.yml', original_file_path='my_metric.yml' ) } self.nested_nodes = { - 'model.snowplow.events': ParsedModelNode( + 'model.snowplow.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -152,13 +149,12 @@ def setUp(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.events': ParsedModelNode( + 'model.root.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -175,13 +171,12 @@ def setUp(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.dep': ParsedModelNode( + 'model.root.dep': ModelNode( name='dep', database='dbt', schema='analytics', @@ -198,13 +193,12 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.nested': ParsedModelNode( + 'model.root.nested': ModelNode( name='nested', database='dbt', schema='analytics', @@ -221,13 +215,12 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.sibling': ParsedModelNode( + 'model.root.sibling': ModelNode( name='sibling', database='dbt', schema='analytics', @@ -244,13 +237,12 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.multi': ParsedModelNode( + 'model.root.multi': ModelNode( name='multi', database='dbt', schema='analytics', @@ -267,7 +259,6 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -276,7 +267,7 @@ def setUp(self): } self.sources = { - 'source.root.my_source.my_table': ParsedSourceDefinition( + 'source.root.my_source.my_table': SourceDefinition( database='raw', schema='analytics', resource_type=NodeType.Source, @@ -289,7 +280,6 @@ def setUp(self): unique_id='source.test.my_source.my_table', fqn=['test', 'my_source', 'my_table'], package_name='root', - root_path='', path='schema.yml', original_file_path='schema.yml', ), @@ -317,7 +307,7 @@ def test__no_nodes(self): metadata=ManifestMetadata(generated_at=datetime.utcnow()), ) - invocation_id = dbt.events.functions.invocation_id + invocation_id = dbt.events.functions.EVENT_MANAGER.invocation_id self.assertEqual( manifest.writable_manifest().to_dict(omit_none=True), { @@ -331,7 +321,7 @@ def test__no_nodes(self): 'child_map': {}, 'metadata': { 'generated_at': '2018-02-14T09:15:13Z', - 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v7.json', + 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v8.json', 'dbt_version': dbt.version.__version__, 'env': {ENV_KEY_NAME: 'value'}, 'invocation_id': invocation_id, @@ -434,7 +424,7 @@ def test__build_flat_graph(self): @mock.patch.object(tracking, 'active_user') def test_metadata(self, mock_user): mock_user.id = 'cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf' - dbt.events.functions.invocation_id = '01234567-0123-0123-0123-0123456789ab' + dbt.events.functions.EVENT_MANAGER.invocation_id = '01234567-0123-0123-0123-0123456789ab' dbt.flags.SEND_ANONYMOUS_USAGE_STATS = False now = datetime.utcnow() self.assertEqual( @@ -457,7 +447,7 @@ def test_metadata(self, mock_user): @freezegun.freeze_time('2018-02-14T09:15:13Z') def test_no_nodes_with_metadata(self, mock_user): mock_user.id = 'cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf' - dbt.events.functions.invocation_id = '01234567-0123-0123-0123-0123456789ab' + dbt.events.functions.EVENT_MANAGER.invocation_id = '01234567-0123-0123-0123-0123456789ab' dbt.flags.SEND_ANONYMOUS_USAGE_STATS = False metadata = ManifestMetadata( project_id='098f6bcd4621d373cade4e832627b4f6', @@ -482,7 +472,7 @@ def test_no_nodes_with_metadata(self, mock_user): 'docs': {}, 'metadata': { 'generated_at': '2018-02-14T09:15:13Z', - 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v7.json', + 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v8.json', 'dbt_version': dbt.version.__version__, 'project_id': '098f6bcd4621d373cade4e832627b4f6', 'user_id': 'cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf', @@ -502,7 +492,7 @@ def test_get_resource_fqns_empty(self): def test_get_resource_fqns(self): nodes = copy.copy(self.nested_nodes) - nodes['seed.root.seed'] = ParsedSeedNode( + nodes['seed.root.seed'] = SeedNode( name='seed', database='dbt', schema='analytics', @@ -511,16 +501,10 @@ def test_get_resource_fqns(self): unique_id='seed.root.seed', fqn=['root', 'seed'], package_name='root', - refs=[['events']], - sources=[], - depends_on=DependsOn(), config=self.model_config, tags=[], path='seed.csv', original_file_path='seed.csv', - root_path='', - language='sql', - raw_code='-- csv --', checksum=FileHash.empty(), ) manifest = Manifest(nodes=nodes, sources=self.sources, macros={}, docs={}, @@ -552,7 +536,7 @@ def test_get_resource_fqns(self): self.assertEqual(resource_fqns, expect) def test__deepcopy_copies_flat_graph(self): - test_node = ParsedModelNode( + test_node = ModelNode( name='events', database='dbt', schema='analytics', @@ -569,7 +553,6 @@ def test__deepcopy_copies_flat_graph(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -597,7 +580,7 @@ def setUp(self): }) self.nested_nodes = { - 'model.snowplow.events': CompiledModelNode( + 'model.snowplow.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -613,7 +596,6 @@ def setUp(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', language='sql', raw_code='does not matter', meta={}, @@ -624,7 +606,7 @@ def setUp(self): extra_ctes=[], checksum=FileHash.empty(), ), - 'model.root.events': CompiledModelNode( + 'model.root.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -640,7 +622,6 @@ def setUp(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', raw_code='does not matter', meta={}, compiled=True, @@ -651,7 +632,7 @@ def setUp(self): extra_ctes=[], checksum=FileHash.empty(), ), - 'model.root.dep': ParsedModelNode( + 'model.root.dep': ModelNode( name='dep', database='dbt', schema='analytics', @@ -667,13 +648,12 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.nested': ParsedModelNode( + 'model.root.nested': ModelNode( name='nested', database='dbt', schema='analytics', @@ -689,13 +669,12 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.sibling': ParsedModelNode( + 'model.root.sibling': ModelNode( name='sibling', database='dbt', schema='analytics', @@ -711,13 +690,12 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.multi': ParsedModelNode( + 'model.root.multi': ModelNode( name='multi', database='dbt', schema='analytics', @@ -733,7 +711,6 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -763,7 +740,7 @@ def test__no_nodes(self): 'child_map': {}, 'metadata': { 'generated_at': '2018-02-14T09:15:13Z', - 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v7.json', + 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v8.json', 'dbt_version': dbt.version.__version__, 'invocation_id': '01234567-0123-0123-0123-0123456789ab', 'env': {ENV_KEY_NAME: 'value'}, diff --git a/test/unit/test_node_types.py b/test/unit/test_node_types.py index fcfb115b9b9..06c27dba7fe 100644 --- a/test/unit/test_node_types.py +++ b/test/unit/test_node_types.py @@ -10,7 +10,7 @@ NodeType.Seed: "seeds", NodeType.RPCCall: "rpcs", NodeType.SqlOperation: "sql operations", - NodeType.Documentation: "docs blocks", + NodeType.Documentation: "docs", NodeType.Source: "sources", NodeType.Macro: "macros", NodeType.Exposure: "exposures", diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 529fbef8b94..19800b7c798 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -1,44 +1,39 @@ -import ipdb +import os import unittest +from copy import deepcopy from unittest import mock -import os import yaml -from copy import deepcopy import dbt.flags import dbt.parser from dbt import tracking from dbt.context.context_config import ContextConfig +from dbt.contracts.files import SourceFile, FileHash, FilePath, SchemaSourceFile +from dbt.contracts.graph.manifest import Manifest +from dbt.contracts.graph.model_config import ( + NodeConfig, TestConfig, SnapshotConfig +) +from dbt.contracts.graph.nodes import ( + ModelNode, Macro, DependsOn, SingularTestNode, SnapshotNode, + AnalysisNode, UnpatchedSourceDefinition +) from dbt.exceptions import CompilationException, ParsingException +from dbt.node_types import NodeType from dbt.parser import ( ModelParser, MacroParser, SingularTestParser, GenericTestParser, SchemaParser, SnapshotParser, AnalysisParser ) +from dbt.parser.generic_test_builders import YamlBlock +from dbt.parser.models import ( + _get_config_call_dict, _shift_sources, _get_exp_sample_result, _get_stable_sample_result, _get_sample_result +) from dbt.parser.schemas import ( TestablePatchParser, SourceParser, AnalysisPatchParser, MacroPatchParser ) from dbt.parser.search import FileBlock -from dbt.parser.generic_test_builders import YamlBlock from dbt.parser.sources import SourcePatcher - -from dbt.node_types import NodeType, ModelLanguage -from dbt.contracts.files import SourceFile, FileHash, FilePath, SchemaSourceFile -from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.model_config import ( - NodeConfig, TestConfig, SnapshotConfig -) -from dbt.contracts.graph.parsed import ( - ParsedModelNode, ParsedMacro, ParsedNodePatch, DependsOn, ColumnInfo, - ParsedSingularTestNode, ParsedGenericTestNode, ParsedSnapshotNode, - ParsedAnalysisNode, UnpatchedSourceDefinition -) -from dbt.contracts.graph.unparsed import Docs -from dbt.parser.models import ( - _get_config_call_dict, _shift_sources, _get_exp_sample_result, _get_stable_sample_result, _get_sample_result -) -import itertools -from .utils import config_from_parts_or_dicts, normalize, generate_name_macros, MockNode, MockSource, MockDocumentation +from .utils import config_from_parts_or_dicts, normalize, generate_name_macros, MockNode def get_abs_os_path(unix_path): @@ -60,13 +55,12 @@ def _generate_macros(self): name_sql[name] = sql for name, sql in name_sql.items(): - pm = ParsedMacro( + pm = Macro( name=name, resource_type=NodeType.Macro, unique_id=f'macro.root.{name}', package_name='root', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/root'), path=normalize('macros/macro.sql'), macro_sql=sql, ) @@ -162,7 +156,7 @@ def file_block_for(self, data: str, filename: str, searched: str): return FileBlock(file=source_file) def assert_has_manifest_lengths(self, manifest, macros=3, nodes=0, - sources=0, docs=0, disabled=0): + sources=0, docs=0, disabled=0): self.assertEqual(len(manifest.macros), macros) self.assertEqual(len(manifest.nodes), nodes) self.assertEqual(len(manifest.sources), sources) @@ -174,9 +168,13 @@ def assertEqualNodes(node_one, node_two): node_one_dict = node_one.to_dict() if 'created_at' in node_one_dict: del node_one_dict['created_at'] + if "relation_name" in node_one_dict: + del node_one_dict["relation_name"] node_two_dict = node_two.to_dict() if 'created_at' in node_two_dict: del node_two_dict['created_at'] + if "relation_name" in node_two_dict: + del node_two_dict["relation_name"] # we don't reall care the order of packages, doing this because it is hard to # make config.packages a set instead of a list if 'config' in node_one_dict and 'packages' in node_one_dict['config']: @@ -193,7 +191,6 @@ def assertEqualNodes(node_one, node_two): assert node_one_dict == node_two_dict - SINGLE_TABLE_SOURCE = ''' version: 2 sources: @@ -218,7 +215,6 @@ def assertEqualNodes(node_one, node_two): values: ['red', 'blue', 'green'] ''' - SINGLE_TABLE_MODEL_TESTS = ''' version: 2 models: @@ -236,7 +232,6 @@ def assertEqualNodes(node_one, node_two): arg: 100 ''' - SINGLE_TABLE_SOURCE_PATCH = ''' version: 2 sources: @@ -399,7 +394,7 @@ def setUp(self): patch_path=None, ) nodes = {my_model_node.unique_id: my_model_node} - macros={m.unique_id: m for m in generate_name_macros('root')} + macros = {m.unique_id: m for m in generate_name_macros('root')} self.manifest = Manifest(nodes=nodes, macros=macros) self.manifest.ref_lookup self.parser = SchemaParser( @@ -492,6 +487,138 @@ def test__parse_basic_model_tests(self): self.assertEqual(self.parser.manifest.files[file_id].node_patches, ['model.root.my_model']) +sql_model = """ +{{ config(materialized="table") }} +select 1 as id +""" + +sql_model_parse_error = "{{ SYNTAX ERROR }}" + +python_model = """ +import textblob +import text as a +from torch import b +import textblob.text +import sklearn + +def model(dbt, session): + dbt.config( + materialized='table', + packages=['sklearn==0.1.0'] + ) + df0 = dbt.ref("a_model").to_pandas() + df1 = dbt.ref("my_sql_model").task.limit(2) + df2 = dbt.ref("my_sql_model_1") + df3 = dbt.ref("my_sql_model_2") + df4 = dbt.source("test", 'table1').limit(max=[max(dbt.ref('something'))]) + df5 = [dbt.ref('test1')] + + a_dict = {'test2': dbt.ref('test2')} + df5 = {'test2': dbt.ref('test3')} + df6 = [dbt.ref("test4")] + + df = df0.limit(2) + return df +""" + +python_model_config = """ +def model(dbt, session): + dbt.config.get("param_1") + dbt.config.get("param_2") + return dbt.ref("some_model") +""" + +python_model_config_with_defaults = """ +def model(dbt, session): + dbt.config.get("param_None", None) + dbt.config.get("param_Str", "default") + dbt.config.get("param_List", [1, 2]) + return dbt.ref("some_model") +""" + +python_model_single_argument = """ +def model(dbt): + dbt.config(materialized="table") + return dbt.ref("some_model") +""" + +python_model_no_argument = """ +import pandas as pd + +def model(): + return pd.dataframe([1, 2]) +""" + +python_model_incorrect_argument_name = """ +def model(tbd, session): + tbd.config(materialized="table") + return tbd.ref("some_model") +""" + +python_model_multiple_models = """ +def model(dbt, session): + dbt.config(materialized='table') + return dbt.ref("some_model") + +def model(dbt, session): + dbt.config(materialized='table') + return dbt.ref("some_model") +""" + +python_model_incorrect_function_name = """ +def model1(dbt, session): + dbt.config(materialized='table') + return dbt.ref("some_model") +""" + +python_model_empty_file = """ """ + +python_model_multiple_returns = """ +def model(dbt, session): + dbt.config(materialized='table') + return dbt.ref("some_model"), dbt.ref("some_other_model") +""" + +python_model_no_return = """ +def model(dbt, session): + dbt.config(materialized='table') +""" + +python_model_single_return = """ +import pandas as pd + +def model(dbt, session): + dbt.config(materialized='table') + return pd.dataframe([1, 2]) +""" + +python_model_incorrect_ref = """ +def model(dbt, session): + model_names = ["orders", "customers"] + models = [] + + for model_name in model_names: + models.extend(dbt.ref(model_name)) + + return models[0] +""" + +python_model_default_materialization = """ +import pandas as pd + +def model(dbt, session): + return pd.dataframe([1, 2]) +""" + +python_model_custom_materialization = """ +import pandas as pd + +def model(dbt, session): + dbt.config(materialized="view") + return pd.dataframe([1, 2]) +""" + + class ModelParserTest(BaseParserTest): def setUp(self): super().setUp() @@ -505,13 +632,12 @@ def file_block_for(self, data, filename): return super().file_block_for(data, filename, 'models') def test_basic(self): - raw_code = '{{ config(materialized="table") }}select 1 as id' - block = self.file_block_for(raw_code, 'nested/model_1.sql') + block = self.file_block_for(sql_model, 'nested/model_1.sql') self.parser.manifest.files[block.file.file_id] = block.file self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedModelNode( + expected = ModelNode( alias='model_1', name='model_1', database='test', @@ -521,11 +647,10 @@ def test_basic(self): fqn=['snowplow', 'nested', 'model_1'], package_name='snowplow', original_file_path=normalize('models/nested/model_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=NodeConfig(materialized='table'), path=normalize('nested/model_1.sql'), language='sql', - raw_code=raw_code, + raw_code=sql_model, checksum=block.file.checksum, unrendered_config={'materialized': 'table'}, config_call_dict={ @@ -536,41 +661,21 @@ def test_basic(self): file_id = 'snowplow://' + normalize('models/nested/model_1.sql') self.assertIn(file_id, self.parser.manifest.files) self.assertEqual(self.parser.manifest.files[file_id].nodes, ['model.snowplow.model_1']) - - def test_parse_python_file(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - packages = ['sklearn==0.1.0'] - ) - import textblob - import text as a - from torch import b - import textblob.text - import sklearn - df0 = pandas(dbt.ref("a_model")) - df1 = dbt.ref("my_sql_model").task.limit(2) - df2 = dbt.ref("my_sql_model_1") - df3 = dbt.ref("my_sql_model_2") - df4 = dbt.source("test", 'table1').limit(max = [max(dbt.ref('something'))]) - df5 = [dbt.ref('test1')] - - a_dict = {'test2' : dbt.ref('test2')} - df5 = anotherfunction({'test2' : dbt.ref('test3')}) - df6 = [somethingelse.ref(dbt.ref("test4"))] - - df = df.limit(2) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + + def test_sql_model_parse_error(self): + block = self.file_block_for(sql_model_parse_error, 'nested/model_1.sql') + with self.assertRaises(CompilationException): + self.parser.parse_file(block) + + def test_python_model_parse(self): + block = self.file_block_for(python_model, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] # we decided to not detect and auto supply for now since import name doesn't always match library name python_packages = ['sklearn==0.1.0'] - expected = ParsedModelNode( + expected = ModelNode( alias='py_model', name='py_model', database='test', @@ -580,167 +685,120 @@ def model(dbt, session): fqn=['snowplow', 'nested', 'py_model'], package_name='snowplow', original_file_path=normalize('models/nested/py_model.py'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=NodeConfig(materialized='table', packages=python_packages), # config.packages = ['textblob'] path=normalize('nested/py_model.py'), language='python', - raw_code=py_code, + raw_code=python_model, checksum=block.file.checksum, - unrendered_config={'materialized': 'table', 'packages':python_packages}, - config_call_dict={'materialized': 'table', 'packages':python_packages}, - refs=[['a_model'], ['my_sql_model'], ['my_sql_model_1'], ['my_sql_model_2'], ['something'], ['test1'], ['test2'], ['test3'], ['test4']], - sources = [['test', 'table1']], + unrendered_config={'materialized': 'table', 'packages': python_packages}, + config_call_dict={'materialized': 'table', 'packages': python_packages}, + refs=[['a_model'], ['my_sql_model'], ['my_sql_model_1'], ['my_sql_model_2'], ['something'], ['test1'], + ['test2'], ['test3'], ['test4']], + sources=[['test', 'table1']], ) assertEqualNodes(node, expected) file_id = 'snowplow://' + normalize('models/nested/py_model.py') self.assertIn(file_id, self.parser.manifest.files) self.assertEqual(self.parser.manifest.files[file_id].nodes, ['model.snowplow.py_model']) - def test_python_model_config_get(self): - py_code = """ -def model(dbt, session): - dbt.config.get("param_1") - dbt.config.get("param_2") - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_config(self): + block = self.file_block_for(python_model_config, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - + self.parser.parse_file(block) node = list(self.parser.manifest.nodes.values())[0] self.assertEqual(node.config.to_dict()["config_keys_used"], ["param_1", "param_2"]) - def test_wrong_python_model_def_miss_session(self): - py_code = """ -def model(dbt): - dbt.config( - materialized='table', - ) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_config_with_defaults(self): + block = self.file_block_for(python_model_config_with_defaults, 'nested/py_model.py') + self.parser.manifest.files[block.file.file_id] = block.file + + self.parser.parse_file(block) + node = list(self.parser.manifest.nodes.values())[0] + default_values = node.config.to_dict()["config_keys_defaults"] + self.assertIsNone(default_values[0]) + self.assertEqual(default_values[1], "default") + self.assertEqual(default_values[2], [1, 2]) + + def test_python_model_single_argument(self): + block = self.file_block_for(python_model_single_argument, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - def test_wrong_python_model_def_miss_session(self): - py_code = """ -def model(): - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_no_argument(self): + block = self.file_block_for(python_model_no_argument, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - def test_wrong_python_model_def_wrong_arg(self): - """ First argument for python model should be dbt - """ - py_code = """ -def model(dat, session): - dbt.config( - materialized='table', - ) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_incorrect_argument_name(self): + block = self.file_block_for(python_model_incorrect_argument_name, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - - def test_wrong_python_model_def_multipe_model(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - ) - return df -def model(dbt, session): - dbt.config( - materialized='table', - ) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_multiple_models(self): + block = self.file_block_for(python_model_multiple_models, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - - def test_wrong_python_model_def_no_model(self): - py_code = """ -def model1(dbt, session): - dbt.config( - materialized='table', - ) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + + def test_python_model_incorrect_function_name(self): + block = self.file_block_for(python_model_incorrect_function_name, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - - def test_wrong_python_model_def_mutiple_return(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - ) - return df1, df2 - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + + def test_python_model_empty_file(self): + block = self.file_block_for(python_model_empty_file, "nested/py_model.py") + self.parser.manifest.files[block.file.file_id] = block.file + self.assertIsNone(self.parser.parse_file(block)) + + def test_python_model_multiple_returns(self): + block = self.file_block_for(python_model_multiple_returns, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - - def test_wrong_python_model_def_no_return(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - ) - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + + def test_python_model_no_return(self): + block = self.file_block_for(python_model_no_return, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - def test_correct_python_model_def_return_function(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - ) - return pandas.dataframe([1,2]) - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_single_return(self): + block = self.file_block_for(python_model_single_return, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - self.parser.parse_file(block) + self.assertIsNone(self.parser.parse_file(block)) - def test_parse_error(self): - block = self.file_block_for('{{ SYNTAX ERROR }}', 'nested/model_1.sql') - with self.assertRaises(CompilationException): + def test_python_model_incorrect_ref(self): + block = self.file_block_for(python_model_incorrect_ref, 'nested/py_model.py') + self.parser.manifest.files[block.file.file_id] = block.file + with self.assertRaises(ParsingException): self.parser.parse_file(block) - def test_parse_ref_with_non_string(self): - py_code = """ -def model(dbt, session): - - model_names = ["orders", "customers"] - models = [] - - for model_name in model_names: - models.extend(dbt.ref(model_name)) + def test_python_model_default_materialization(self): + block = self.file_block_for(python_model_default_materialization, 'nested/py_model.py') + self.parser.manifest.files[block.file.file_id] = block.file + self.parser.parse_file(block) + node = list(self.parser.manifest.nodes.values())[0] + self.assertEqual(node.get_materialization(), "table") - return models[0] - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_custom_materialization(self): + block = self.file_block_for(python_model_custom_materialization, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): - self.parser.parse_file(block) - + self.parser.parse_file(block) + node = list(self.parser.manifest.nodes.values())[0] + self.assertEqual(node.get_materialization(), "view") + def test_python_model_custom_materialization(self): + block = self.file_block_for(python_model_custom_materialization, 'nested/py_model.py') + self.parser.manifest.files[block.file.file_id] = block.file + self.parser.parse_file(block) + node = list(self.parser.manifest.nodes.values())[0] + self.assertEqual(node.get_materialization(), "view") class StaticModelParserTest(BaseParserTest): def setUp(self): @@ -759,20 +817,19 @@ def file_block_for(self, data, filename): # parser does not run in this case. That test is in integration test suite 072 def test_built_in_macro_override_detection(self): macro_unique_id = 'macro.root.ref' - self.parser.manifest.macros[macro_unique_id] = ParsedMacro( + self.parser.manifest.macros[macro_unique_id] = Macro( name='ref', resource_type=NodeType.Macro, unique_id=macro_unique_id, package_name='root', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/root'), path=normalize('macros/macro.sql'), macro_sql='{% macro ref(model_name) %}{% set x = raise("boom") %}{% endmacro %}', ) raw_code = '{{ config(materialized="table") }}select 1 as id' block = self.file_block_for(raw_code, 'nested/model_1.sql') - node = ParsedModelNode( + node = ModelNode( alias='model_1', name='model_1', database='test', @@ -782,7 +839,6 @@ def test_built_in_macro_override_detection(self): fqn=['snowplow', 'nested', 'model_1'], package_name='snowplow', original_file_path=normalize('models/nested/model_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=NodeConfig(materialized='table'), path=normalize('nested/model_1.sql'), language='sql', @@ -791,9 +847,10 @@ def test_built_in_macro_override_detection(self): unrendered_config={'materialized': 'table'}, ) - assert(self.parser._has_banned_macro(node)) + assert (self.parser._has_banned_macro(node)) -# TODO + +# TODO class StaticModelParserUnitTest(BaseParserTest): # _get_config_call_dict # _shift_sources @@ -808,7 +865,7 @@ def setUp(self): manifest=self.manifest, root_project=self.root_project_config, ) - self.example_node = ParsedModelNode( + self.example_node = ModelNode( alias='model_1', name='model_1', database='test', @@ -818,7 +875,6 @@ def setUp(self): fqn=['snowplow', 'nested', 'model_1'], package_name='snowplow', original_file_path=normalize('models/nested/model_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=NodeConfig(materialized='table'), path=normalize('nested/model_1.sql'), language='sql', @@ -969,7 +1025,8 @@ def file_block_for(self, data, filename): return super().file_block_for(data, filename, 'snapshots') def test_parse_error(self): - block = self.file_block_for('{% snapshot foo %}select 1 as id{%snapshot bar %}{% endsnapshot %}', 'nested/snap_1.sql') + block = self.file_block_for('{% snapshot foo %}select 1 as id{%snapshot bar %}{% endsnapshot %}', + 'nested/snap_1.sql') with self.assertRaises(CompilationException): self.parser.parse_file(block) @@ -988,7 +1045,7 @@ def test_single_block(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedSnapshotNode( + expected = SnapshotNode( alias='foo', name='foo', # the `database` entry is overrridden by the target_database config @@ -999,7 +1056,6 @@ def test_single_block(self): fqn=['snowplow', 'nested', 'snap_1', 'foo'], package_name='snowplow', original_file_path=normalize('snapshots/nested/snap_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=SnapshotConfig( strategy='timestamp', updated_at='last_update', @@ -1020,10 +1076,10 @@ def test_single_block(self): 'updated_at': 'last_update', }, config_call_dict={ - 'strategy': 'timestamp', - 'target_database': 'dbt', - 'target_schema': 'analytics', - 'unique_key': 'id', + 'strategy': 'timestamp', + 'target_database': 'dbt', + 'target_schema': 'analytics', + 'unique_key': 'id', 'updated_at': 'last_update', }, ) @@ -1058,7 +1114,7 @@ def test_multi_block(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=2) nodes = sorted(self.parser.manifest.nodes.values(), key=lambda n: n.name) - expect_foo = ParsedSnapshotNode( + expect_foo = SnapshotNode( alias='foo', name='foo', database='dbt', @@ -1068,7 +1124,6 @@ def test_multi_block(self): fqn=['snowplow', 'nested', 'snap_1', 'foo'], package_name='snowplow', original_file_path=normalize('snapshots/nested/snap_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=SnapshotConfig( strategy='timestamp', updated_at='last_update', @@ -1089,14 +1144,14 @@ def test_multi_block(self): 'updated_at': 'last_update', }, config_call_dict={ - 'strategy': 'timestamp', - 'target_database': 'dbt', - 'target_schema': 'analytics', - 'unique_key': 'id', + 'strategy': 'timestamp', + 'target_database': 'dbt', + 'target_schema': 'analytics', + 'unique_key': 'id', 'updated_at': 'last_update', }, ) - expect_bar = ParsedSnapshotNode( + expect_bar = SnapshotNode( alias='bar', name='bar', database='dbt', @@ -1106,7 +1161,6 @@ def test_multi_block(self): fqn=['snowplow', 'nested', 'snap_1', 'bar'], package_name='snowplow', original_file_path=normalize('snapshots/nested/snap_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=SnapshotConfig( strategy='timestamp', updated_at='last_update', @@ -1127,10 +1181,10 @@ def test_multi_block(self): 'updated_at': 'last_update', }, config_call_dict={ - 'strategy': 'timestamp', - 'target_database': 'dbt', - 'target_schema': 'analytics', - 'unique_key': 'id', + 'strategy': 'timestamp', + 'target_database': 'dbt', + 'target_schema': 'analytics', + 'unique_key': 'id', 'updated_at': 'last_update', }, ) @@ -1160,13 +1214,12 @@ def test_single_block(self): self.parser.parse_file(block) self.assertEqual(len(self.parser.manifest.macros), 1) macro = list(self.parser.manifest.macros.values())[0] - expected = ParsedMacro( + expected = Macro( name='foo', resource_type=NodeType.Macro, unique_id='macro.snowplow.foo', package_name='snowplow', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), path=normalize('macros/macro.sql'), macro_sql=raw_code, ) @@ -1183,23 +1236,21 @@ def test_multiple_blocks(self): self.parser.parse_file(block) self.assertEqual(len(self.parser.manifest.macros), 2) macros = sorted(self.parser.manifest.macros.values(), key=lambda m: m.name) - expected_bar = ParsedMacro( + expected_bar = Macro( name='bar', resource_type=NodeType.Macro, unique_id='macro.snowplow.bar', package_name='snowplow', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), path=normalize('macros/macro.sql'), macro_sql='{% macro bar(c, d) %}c + d{% endmacro %}', ) - expected_foo = ParsedMacro( + expected_foo = Macro( name='foo', resource_type=NodeType.Macro, unique_id='macro.snowplow.foo', package_name='snowplow', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), path=normalize('macros/macro.sql'), macro_sql='{% macro foo(a, b) %}a ~ b{% endmacro %}', ) @@ -1232,7 +1283,7 @@ def test_basic(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedSingularTestNode( + expected = SingularTestNode( alias='test_1', name='test_1', database='test', @@ -1242,7 +1293,6 @@ def test_basic(self): fqn=['snowplow', 'test_1'], package_name='snowplow', original_file_path=normalize('tests/test_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), refs=[['blah']], config=TestConfig(severity='ERROR'), tags=[], @@ -1259,7 +1309,7 @@ def test_basic(self): class GenericTestParserTest(BaseParserTest): -# generic tests in the test-paths directory currently leverage the macro parser + # generic tests in the test-paths directory currently leverage the macro parser def setUp(self): super().setUp() self.parser = GenericTestParser( @@ -1276,13 +1326,12 @@ def test_basic(self): self.parser.manifest.files[block.file.file_id] = block.file self.parser.parse_file(block) node = list(self.parser.manifest.macros.values())[0] - expected = ParsedMacro( + expected = Macro( name='test_not_null', resource_type=NodeType.Macro, unique_id='macro.snowplow.test_not_null', package_name='snowplow', original_file_path=normalize('tests/generic/test_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), path=normalize('tests/generic/test_1.sql'), macro_sql=raw_code, ) @@ -1311,7 +1360,7 @@ def test_basic(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedAnalysisNode( + expected = AnalysisNode( alias='analysis_1', name='analysis_1', database='test', @@ -1321,7 +1370,6 @@ def test_basic(self): fqn=['snowplow', 'analysis', 'nested', 'analysis_1'], package_name='snowplow', original_file_path=normalize('analyses/nested/analysis_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), depends_on=DependsOn(), config=NodeConfig(), path=normalize('analysis/nested/analysis_1.sql'), @@ -1329,10 +1377,9 @@ def test_basic(self): raw_code=raw_code, checksum=block.file.checksum, unrendered_config={}, + relation_name=None, ) assertEqualNodes(node, expected) - file_id = 'snowplow://' + normalize('analyses/nested/analysis_1.sql') + file_id = 'snowplow://' + normalize('analyses/nested/analysis_1.sql') self.assertIn(file_id, self.parser.manifest.files) self.assertEqual(self.parser.manifest.files[file_id].nodes, ['analysis.snowplow.analysis_1']) - - diff --git a/test/unit/test_partial_parsing.py b/test/unit/test_partial_parsing.py index de0e230ad3c..34e85b0cef0 100644 --- a/test/unit/test_partial_parsing.py +++ b/test/unit/test_partial_parsing.py @@ -5,7 +5,7 @@ import dbt.exceptions from dbt.parser.partial import PartialParsing from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedModelNode +from dbt.contracts.graph.nodes import ModelNode from dbt.contracts.files import ParseFileType, SourceFile, SchemaSourceFile, FilePath, FileHash from dbt.node_types import NodeType from .utils import normalize @@ -88,9 +88,8 @@ def setUp(self): self.partial_parsing = PartialParsing(self.saved_manifest, self.new_files) def get_model(self, name): - return ParsedModelNode( + return ModelNode( package_name='my_test', - root_path='/users/root/', path=f'{name}.sql', original_file_path=f'models/{name}.sql', language='sql', @@ -107,9 +106,8 @@ def get_model(self, name): ) def get_python_model(self, name): - return ParsedModelNode( + return ModelNode( package_name='my_test', - root_path='/users/root/', path=f'{name}.py', original_file_path=f'models/{name}.py', raw_code='import something', diff --git a/test/unit/utils.py b/test/unit/utils.py index 521a83e329c..e1512abee2d 100644 --- a/test/unit/utils.py +++ b/test/unit/utils.py @@ -225,7 +225,7 @@ def assert_fails_validation(dct, cls): def generate_name_macros(package): - from dbt.contracts.graph.parsed import ParsedMacro + from dbt.contracts.graph.nodes import Macro from dbt.node_types import NodeType name_sql = {} for component in ('database', 'schema', 'alias'): @@ -238,13 +238,12 @@ def generate_name_macros(package): name_sql[name] = sql for name, sql in name_sql.items(): - pm = ParsedMacro( + pm = Macro( name=name, resource_type=NodeType.Macro, unique_id=f'macro.{package}.{name}', package_name=package, original_file_path=normalize('macros/macro.sql'), - root_path='./dbt_packages/root', path=normalize('macros/macro.sql'), macro_sql=sql, ) @@ -274,7 +273,7 @@ def _make_table_of(self, rows, column_types): def MockMacro(package, name='my_macro', **kwargs): - from dbt.contracts.graph.parsed import ParsedMacro + from dbt.contracts.graph.nodes import Macro from dbt.node_types import NodeType mock_kwargs = dict( @@ -287,7 +286,7 @@ def MockMacro(package, name='my_macro', **kwargs): mock_kwargs.update(kwargs) macro = mock.MagicMock( - spec=ParsedMacro, + spec=Macro, **mock_kwargs ) macro.name = name @@ -308,9 +307,9 @@ def MockGenerateMacro(package, component='some_component', **kwargs): def MockSource(package, source_name, name, **kwargs): from dbt.node_types import NodeType - from dbt.contracts.graph.parsed import ParsedSourceDefinition + from dbt.contracts.graph.nodes import SourceDefinition src = mock.MagicMock( - __class__=ParsedSourceDefinition, + __class__=SourceDefinition, resource_type=NodeType.Source, source_name=source_name, package_name=package, @@ -324,13 +323,13 @@ def MockSource(package, source_name, name, **kwargs): def MockNode(package, name, resource_type=None, **kwargs): from dbt.node_types import NodeType - from dbt.contracts.graph.parsed import ParsedModelNode, ParsedSeedNode + from dbt.contracts.graph.nodes import ModelNode, SeedNode if resource_type is None: resource_type = NodeType.Model if resource_type == NodeType.Model: - cls = ParsedModelNode + cls = ModelNode elif resource_type == NodeType.Seed: - cls = ParsedSeedNode + cls = SeedNode else: raise ValueError(f'I do not know how to handle {resource_type}') node = mock.MagicMock( @@ -347,9 +346,9 @@ def MockNode(package, name, resource_type=None, **kwargs): def MockDocumentation(package, name, **kwargs): from dbt.node_types import NodeType - from dbt.contracts.graph.parsed import ParsedDocumentation + from dbt.contracts.graph.nodes import Documentation doc = mock.MagicMock( - __class__=ParsedDocumentation, + __class__=Documentation, resource_type=NodeType.Documentation, package_name=package, search_name=name, diff --git a/tests/CONVERTING.md b/tests/CONVERTING.md index 89801fc74b9..44057cad05b 100644 --- a/tests/CONVERTING.md +++ b/tests/CONVERTING.md @@ -30,7 +30,7 @@ * some of the legacy tests used a 'default_project' method to change (for example) the seeds directory to load a different seed. Don't do that. Copying a file is probably a better option. - +* If there are more than 50 lines of fixture strings, they should be defined in a fixtures.py and then imported. We definitely don't do this everywhere right now but should move to this model. # Integration test directories that have been converted * 001\_simple\_copy\_tests => moved to 'basic' diff --git a/tests/adapter/dbt/tests/adapter/__version__.py b/tests/adapter/dbt/tests/adapter/__version__.py index 70ba273f562..27cfeecd9e8 100644 --- a/tests/adapter/dbt/tests/adapter/__version__.py +++ b/tests/adapter/dbt/tests/adapter/__version__.py @@ -1 +1 @@ -version = "1.4.0a1" +version = "1.4.0b1" diff --git a/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py b/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py index d9ff6b5b28f..a9f846e2ca4 100644 --- a/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py +++ b/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py @@ -50,7 +50,10 @@ def models(self): @pytest.fixture(scope="class") def macros(self): - return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} + return { + "cast.sql": MACROS__CAST_SQL, + "expect_value.sql": MACROS__EXPECT_VALUE_SQL + } def test_alias_model_name(self, project): results = run_dbt(["run"]) @@ -68,7 +71,10 @@ def project_config_update(self): @pytest.fixture(scope="class") def macros(self): - return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} + return { + "cast.sql": MACROS__CAST_SQL, + "expect_value.sql": MACROS__EXPECT_VALUE_SQL + } @pytest.fixture(scope="class") def models(self): @@ -94,7 +100,10 @@ def project_config_update(self): @pytest.fixture(scope="class") def macros(self): - return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} + return { + "cast.sql": MACROS__CAST_SQL, + "expect_value.sql": MACROS__EXPECT_VALUE_SQL + } @pytest.fixture(scope="class") def models(self): @@ -121,14 +130,19 @@ def project_config_update(self, unique_schema): "models": { "test": { "alias": "duped_alias", - "model_b": {"schema": unique_schema + "_alt"}, + "model_b": { + "schema": unique_schema + "_alt" + }, }, }, } @pytest.fixture(scope="class") def macros(self): - return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} + return { + "cast.sql": MACROS__CAST_SQL, + "expect_value.sql": MACROS__EXPECT_VALUE_SQL + } @pytest.fixture(scope="class") def models(self): diff --git a/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py b/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py new file mode 100644 index 00000000000..b7b0ff9ac17 --- /dev/null +++ b/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py @@ -0,0 +1,107 @@ +import pytest +import os +import re +import yaml +from dbt.tests.util import run_dbt + +MODELS__MODEL_SQL = """ +seled 1 as id +""" + + +class BaseDebug: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": MODELS__MODEL_SQL} + + @pytest.fixture(autouse=True) + def capsys(self, capsys): + self.capsys = capsys + + def assertGotValue(self, linepat, result): + found = False + output = self.capsys.readouterr().out + for line in output.split('\n'): + if linepat.match(line): + found = True + assert result in line + if not found: + with pytest.raises(Exception) as exc: + msg = f"linepat {linepat} not found in stdout: {output}" + assert msg in str(exc.value) + + def check_project(self, splitout, msg="ERROR invalid"): + for line in splitout: + if line.strip().startswith("dbt_project.yml file"): + assert msg in line + elif line.strip().startswith("profiles.yml file"): + assert "ERROR invalid" not in line + + +class BaseDebugProfileVariable(BaseDebug): + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "config-version": 2, + "profile": '{{ "te" ~ "st" }}' + } + + +class TestDebugPostgres(BaseDebug): + def test_ok(self, project): + run_dbt(["debug"]) + assert "ERROR" not in self.capsys.readouterr().out + + def test_nopass(self, project): + run_dbt(["debug", "--target", "nopass"], expect_pass=False) + self.assertGotValue(re.compile(r"\s+profiles\.yml file"), "ERROR invalid") + + def test_wronguser(self, project): + run_dbt(["debug", "--target", "wronguser"], expect_pass=False) + self.assertGotValue(re.compile(r"\s+Connection test"), "ERROR") + + def test_empty_target(self, project): + run_dbt(["debug", "--target", "none_target"], expect_pass=False) + self.assertGotValue(re.compile(r"\s+output 'none_target'"), "misconfigured") + + +class TestDebugProfileVariablePostgres(BaseDebugProfileVariable): + pass + + +class TestDebugInvalidProjectPostgres(BaseDebug): + + def test_empty_project(self, project): + with open("dbt_project.yml", "w") as f: # noqa: F841 + pass + + run_dbt(["debug", "--profile", "test"], expect_pass=False) + splitout = self.capsys.readouterr().out.split("\n") + self.check_project(splitout) + + def test_badproject(self, project): + update_project = {"invalid-key": "not a valid key so this is bad project"} + + with open("dbt_project.yml", "w") as f: + yaml.safe_dump(update_project, f) + + run_dbt(["debug", "--profile", "test"], expect_pass=False) + splitout = self.capsys.readouterr().out.split("\n") + self.check_project(splitout) + + def test_not_found_project(self, project): + run_dbt(["debug", "--project-dir", "nopass"], expect_pass=False) + splitout = self.capsys.readouterr().out.split("\n") + self.check_project(splitout, msg="ERROR not found") + + def test_invalid_project_outside_current_dir(self, project): + # create a dbt_project.yml + project_config = { + "invalid-key": "not a valid key in this project" + } + os.makedirs("custom", exist_ok=True) + with open("custom/dbt_project.yml", "w") as f: + yaml.safe_dump(project_config, f, default_flow_style=True) + run_dbt(["debug", "--project-dir", "custom"], expect_pass=False) + splitout = self.capsys.readouterr().out.split("\n") + self.check_project(splitout) diff --git a/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py new file mode 100644 index 00000000000..11a4b6c0384 --- /dev/null +++ b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py @@ -0,0 +1,154 @@ +import pytest +from dbt.tests.util import run_dbt, check_relations_equal +from collections import namedtuple + + +models__delete_insert_incremental_predicates_sql = """ +{{ config( + materialized = 'incremental', + unique_key = 'id' +) }} + +{% if not is_incremental() %} + +select 1 as id, 'hello' as msg, 'blue' as color +union all +select 2 as id, 'goodbye' as msg, 'red' as color + +{% else %} + +-- delete will not happen on the above record where id = 2, so new record will be inserted instead +select 1 as id, 'hey' as msg, 'blue' as color +union all +select 2 as id, 'yo' as msg, 'green' as color +union all +select 3 as id, 'anyway' as msg, 'purple' as color + +{% endif %} +""" + +seeds__expected_delete_insert_incremental_predicates_csv = """id,msg,color +1,hey,blue +2,goodbye,red +2,yo,green +3,anyway,purple +""" + +ResultHolder = namedtuple( + "ResultHolder", + [ + "seed_count", + "model_count", + "seed_rows", + "inc_test_model_count", + "opt_model_count", + "relation", + ], +) + + +class BaseIncrementalPredicates: + @pytest.fixture(scope="class") + def models(self): + return { + "delete_insert_incremental_predicates.sql": models__delete_insert_incremental_predicates_sql + } + + @pytest.fixture(scope="class") + def seeds(self): + return { + "expected_delete_insert_incremental_predicates.csv": seeds__expected_delete_insert_incremental_predicates_csv + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "models": { + "+incremental_predicates": [ + "id != 2" + ], + "+incremental_strategy": "delete+insert" + } + } + + def update_incremental_model(self, incremental_model): + """update incremental model after the seed table has been updated""" + model_result_set = run_dbt(["run", "--select", incremental_model]) + return len(model_result_set) + + def get_test_fields( + self, project, seed, incremental_model, update_sql_file, opt_model_count=None + ): + + seed_count = len(run_dbt(["seed", "--select", seed, "--full-refresh"])) + + model_count = len(run_dbt(["run", "--select", incremental_model, "--full-refresh"])) + # pass on kwarg + relation = incremental_model + # update seed in anticipation of incremental model update + row_count_query = "select * from {}.{}".format(project.test_schema, seed) + # project.run_sql_file(Path("seeds") / Path(update_sql_file + ".sql")) + seed_rows = len(project.run_sql(row_count_query, fetch="all")) + + # propagate seed state to incremental model according to unique keys + inc_test_model_count = self.update_incremental_model(incremental_model=incremental_model) + + return ResultHolder( + seed_count, model_count, seed_rows, inc_test_model_count, opt_model_count, relation + ) + + def check_scenario_correctness(self, expected_fields, test_case_fields, project): + """Invoke assertions to verify correct build functionality""" + # 1. test seed(s) should build afresh + assert expected_fields.seed_count == test_case_fields.seed_count + # 2. test model(s) should build afresh + assert expected_fields.model_count == test_case_fields.model_count + # 3. seeds should have intended row counts post update + assert expected_fields.seed_rows == test_case_fields.seed_rows + # 4. incremental test model(s) should be updated + assert expected_fields.inc_test_model_count == test_case_fields.inc_test_model_count + # 5. extra incremental model(s) should be built; optional since + # comparison may be between an incremental model and seed + if expected_fields.opt_model_count and test_case_fields.opt_model_count: + assert expected_fields.opt_model_count == test_case_fields.opt_model_count + # 6. result table should match intended result set (itself a relation) + check_relations_equal( + project.adapter, [expected_fields.relation, test_case_fields.relation] + ) + + def get_expected_fields(self, relation, seed_rows, opt_model_count=None): + return ResultHolder( + seed_count=1, + model_count=1, + inc_test_model_count=1, + seed_rows=seed_rows, + opt_model_count=opt_model_count, + relation=relation + ) + + # no unique_key test + def test__incremental_predicates(self, project): + """seed should match model after two incremental runs""" + + expected_fields = self.get_expected_fields(relation="expected_delete_insert_incremental_predicates", seed_rows=4) + test_case_fields = self.get_test_fields( + project, seed="expected_delete_insert_incremental_predicates", incremental_model="delete_insert_incremental_predicates", update_sql_file=None + ) + self.check_scenario_correctness(expected_fields, test_case_fields, project) + + +class TestIncrementalPredicatesDeleteInsert(BaseIncrementalPredicates): + pass + + +class TestPredicatesDeleteInsert(BaseIncrementalPredicates): + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "models": { + "+predicates": [ + "id != 2" + ], + "+incremental_strategy": "delete+insert" + } + } diff --git a/tests/adapter/setup.py b/tests/adapter/setup.py index ddb664d6989..f9ac627e445 100644 --- a/tests/adapter/setup.py +++ b/tests/adapter/setup.py @@ -20,7 +20,7 @@ package_name = "dbt-tests-adapter" -package_version = "1.4.0a1" +package_version = "1.4.0b1" description = """The dbt adapter tests for adapter plugins""" this_directory = os.path.abspath(os.path.dirname(__file__)) diff --git a/tests/functional/artifacts/data/state/v8/manifest.json b/tests/functional/artifacts/data/state/v8/manifest.json new file mode 100644 index 00000000000..58e3f04da3c --- /dev/null +++ b/tests/functional/artifacts/data/state/v8/manifest.json @@ -0,0 +1 @@ +{"metadata": {"dbt_schema_version": "https://schemas.getdbt.com/dbt/manifest/v8.json", "dbt_version": "1.4.0a1", "generated_at": "2022-12-12T13:54:37.804887Z", "invocation_id": "843eaaec-db3b-4406-87ec-a3651f124d69", "env": {}, "project_id": "098f6bcd4621d373cade4e832627b4f6", "user_id": null, "send_anonymous_usage_stats": false, "adapter_type": "postgres"}, "nodes": {"model.test.my_model": {"database": "dbt", "schema": "test16708532772964762671_test_previous_version_state", "name": "my_model", "resource_type": "model", "package_name": "test", "path": "my_model.sql", "original_file_path": "models/my_model.sql", "unique_id": "model.test.my_model", "fqn": ["test", "my_model"], "alias": "my_model", "checksum": {"name": "sha256", "checksum": "2b9123e04ab8bb798f7c565afdc3ee0e56fcd66b4bfbdb435b4891c878d947c5"}, "config": {"enabled": true, "alias": null, "schema": null, "database": null, "tags": [], "meta": {}, "materialized": "view", "incremental_strategy": null, "persist_docs": {}, "quoting": {}, "column_types": {}, "full_refresh": null, "unique_key": null, "on_schema_change": "ignore", "grants": {}, "packages": [], "docs": {"show": true, "node_color": null}, "post-hook": [], "pre-hook": []}, "tags": [], "description": "", "columns": {}, "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "build_path": null, "deferred": false, "unrendered_config": {}, "created_at": 1670853278.478401, "relation_name": "\"dbt\".\"test16708532772964762671_test_previous_version_state\".\"my_model\"", "raw_code": "select 1 as id", "language": "sql", "refs": [], "sources": [], "metrics": [], "depends_on": {"macros": [], "nodes": []}, "compiled_path": null}}, "sources": {}, "macros": {"macro.dbt_postgres.postgres__current_timestamp": {"name": "postgres__current_timestamp", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__current_timestamp", "macro_sql": "{% macro postgres__current_timestamp() -%}\n now()\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.828495, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_string_as_time": {"name": "postgres__snapshot_string_as_time", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__snapshot_string_as_time", "macro_sql": "{% macro postgres__snapshot_string_as_time(timestamp) -%}\n {%- set result = \"'\" ~ timestamp ~ \"'::timestamp without time zone\" -%}\n {{ return(result) }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.829041, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_get_time": {"name": "postgres__snapshot_get_time", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__snapshot_get_time", "macro_sql": "{% macro postgres__snapshot_get_time() -%}\n {{ current_timestamp() }}::timestamp without time zone\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.829317, "supported_languages": null}, "macro.dbt_postgres.postgres__current_timestamp_backcompat": {"name": "postgres__current_timestamp_backcompat", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__current_timestamp_backcompat", "macro_sql": "{% macro postgres__current_timestamp_backcompat() %}\n current_timestamp::{{ type_timestamp() }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.829592, "supported_languages": null}, "macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat": {"name": "postgres__current_timestamp_in_utc_backcompat", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat", "macro_sql": "{% macro postgres__current_timestamp_in_utc_backcompat() %}\n (current_timestamp at time zone 'utc')::{{ type_timestamp() }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.829864, "supported_languages": null}, "macro.dbt_postgres.postgres__get_catalog": {"name": "postgres__get_catalog", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/catalog.sql", "original_file_path": "macros/catalog.sql", "unique_id": "macro.dbt_postgres.postgres__get_catalog", "macro_sql": "{% macro postgres__get_catalog(information_schema, schemas) -%}\n\n {%- call statement('catalog', fetch_result=True) -%}\n {#\n If the user has multiple databases set and the first one is wrong, this will fail.\n But we won't fail in the case where there are multiple quoting-difference-only dbs, which is better.\n #}\n {% set database = information_schema.database %}\n {{ adapter.verify_database(database) }}\n\n select\n '{{ database }}' as table_database,\n sch.nspname as table_schema,\n tbl.relname as table_name,\n case tbl.relkind\n when 'v' then 'VIEW'\n else 'BASE TABLE'\n end as table_type,\n tbl_desc.description as table_comment,\n col.attname as column_name,\n col.attnum as column_index,\n pg_catalog.format_type(col.atttypid, col.atttypmod) as column_type,\n col_desc.description as column_comment,\n pg_get_userbyid(tbl.relowner) as table_owner\n\n from pg_catalog.pg_namespace sch\n join pg_catalog.pg_class tbl on tbl.relnamespace = sch.oid\n join pg_catalog.pg_attribute col on col.attrelid = tbl.oid\n left outer join pg_catalog.pg_description tbl_desc on (tbl_desc.objoid = tbl.oid and tbl_desc.objsubid = 0)\n left outer join pg_catalog.pg_description col_desc on (col_desc.objoid = tbl.oid and col_desc.objsubid = col.attnum)\n\n where (\n {%- for schema in schemas -%}\n upper(sch.nspname) = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}\n {%- endfor -%}\n )\n and not pg_is_other_temp_schema(sch.oid) -- not a temporary schema belonging to another session\n and tbl.relpersistence in ('p', 'u') -- [p]ermanent table or [u]nlogged table. Exclude [t]emporary tables\n and tbl.relkind in ('r', 'v', 'f', 'p') -- o[r]dinary table, [v]iew, [f]oreign table, [p]artitioned table. Other values are [i]ndex, [S]equence, [c]omposite type, [t]OAST table, [m]aterialized view\n and col.attnum > 0 -- negative numbers are used for system columns such as oid\n and not col.attisdropped -- column as not been dropped\n\n order by\n sch.nspname,\n tbl.relname,\n col.attnum\n\n {%- endcall -%}\n\n {{ return(load_result('catalog').table) }}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.832119, "supported_languages": null}, "macro.dbt_postgres.postgres_get_relations": {"name": "postgres_get_relations", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/relations.sql", "original_file_path": "macros/relations.sql", "unique_id": "macro.dbt_postgres.postgres_get_relations", "macro_sql": "{% macro postgres_get_relations () -%}\n\n {#\n -- in pg_depend, objid is the dependent, refobjid is the referenced object\n -- > a pg_depend entry indicates that the referenced object cannot be\n -- > dropped without also dropping the dependent object.\n #}\n\n {%- call statement('relations', fetch_result=True) -%}\n with relation as (\n select\n pg_rewrite.ev_class as class,\n pg_rewrite.oid as id\n from pg_rewrite\n ),\n class as (\n select\n oid as id,\n relname as name,\n relnamespace as schema,\n relkind as kind\n from pg_class\n ),\n dependency as (\n select distinct\n pg_depend.objid as id,\n pg_depend.refobjid as ref\n from pg_depend\n ),\n schema as (\n select\n pg_namespace.oid as id,\n pg_namespace.nspname as name\n from pg_namespace\n where nspname != 'information_schema' and nspname not like 'pg\\_%'\n ),\n referenced as (\n select\n relation.id AS id,\n referenced_class.name ,\n referenced_class.schema ,\n referenced_class.kind\n from relation\n join class as referenced_class on relation.class=referenced_class.id\n where referenced_class.kind in ('r', 'v')\n ),\n relationships as (\n select\n referenced.name as referenced_name,\n referenced.schema as referenced_schema_id,\n dependent_class.name as dependent_name,\n dependent_class.schema as dependent_schema_id,\n referenced.kind as kind\n from referenced\n join dependency on referenced.id=dependency.id\n join class as dependent_class on dependency.ref=dependent_class.id\n where\n (referenced.name != dependent_class.name or\n referenced.schema != dependent_class.schema)\n )\n\n select\n referenced_schema.name as referenced_schema,\n relationships.referenced_name as referenced_name,\n dependent_schema.name as dependent_schema,\n relationships.dependent_name as dependent_name\n from relationships\n join schema as dependent_schema on relationships.dependent_schema_id=dependent_schema.id\n join schema as referenced_schema on relationships.referenced_schema_id=referenced_schema.id\n group by referenced_schema, referenced_name, dependent_schema, dependent_name\n order by referenced_schema, referenced_name, dependent_schema, dependent_name;\n\n {%- endcall -%}\n\n {{ return(load_result('relations').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.833379, "supported_languages": null}, "macro.dbt_postgres.postgres__create_table_as": {"name": "postgres__create_table_as", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__create_table_as", "macro_sql": "{% macro postgres__create_table_as(temporary, relation, sql) -%}\n {%- set unlogged = config.get('unlogged', default=false) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n\n create {% if temporary -%}\n temporary\n {%- elif unlogged -%}\n unlogged\n {%- endif %} table {{ relation }}\n as (\n {{ sql }}\n );\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.844162, "supported_languages": null}, "macro.dbt_postgres.postgres__get_create_index_sql": {"name": "postgres__get_create_index_sql", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__get_create_index_sql", "macro_sql": "{% macro postgres__get_create_index_sql(relation, index_dict) -%}\n {%- set index_config = adapter.parse_index(index_dict) -%}\n {%- set comma_separated_columns = \", \".join(index_config.columns) -%}\n {%- set index_name = index_config.render(relation) -%}\n\n create {% if index_config.unique -%}\n unique\n {%- endif %} index if not exists\n \"{{ index_name }}\"\n on {{ relation }} {% if index_config.type -%}\n using {{ index_config.type }}\n {%- endif %}\n ({{ comma_separated_columns }});\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.84543, "supported_languages": null}, "macro.dbt_postgres.postgres__create_schema": {"name": "postgres__create_schema", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__create_schema", "macro_sql": "{% macro postgres__create_schema(relation) -%}\n {% if relation.database -%}\n {{ adapter.verify_database(relation.database) }}\n {%- endif -%}\n {%- call statement('create_schema') -%}\n create schema if not exists {{ relation.without_identifier().include(database=False) }}\n {%- endcall -%}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.846217, "supported_languages": null}, "macro.dbt_postgres.postgres__drop_schema": {"name": "postgres__drop_schema", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__drop_schema", "macro_sql": "{% macro postgres__drop_schema(relation) -%}\n {% if relation.database -%}\n {{ adapter.verify_database(relation.database) }}\n {%- endif -%}\n {%- call statement('drop_schema') -%}\n drop schema if exists {{ relation.without_identifier().include(database=False) }} cascade\n {%- endcall -%}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.847004, "supported_languages": null}, "macro.dbt_postgres.postgres__get_columns_in_relation": {"name": "postgres__get_columns_in_relation", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__get_columns_in_relation", "macro_sql": "{% macro postgres__get_columns_in_relation(relation) -%}\n {% call statement('get_columns_in_relation', fetch_result=True) %}\n select\n column_name,\n data_type,\n character_maximum_length,\n numeric_precision,\n numeric_scale\n\n from {{ relation.information_schema('columns') }}\n where table_name = '{{ relation.identifier }}'\n {% if relation.schema %}\n and table_schema = '{{ relation.schema }}'\n {% endif %}\n order by ordinal_position\n\n {% endcall %}\n {% set table = load_result('get_columns_in_relation').table %}\n {{ return(sql_convert_columns_in_relation(table)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement", "macro.dbt.sql_convert_columns_in_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8481832, "supported_languages": null}, "macro.dbt_postgres.postgres__list_relations_without_caching": {"name": "postgres__list_relations_without_caching", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__list_relations_without_caching", "macro_sql": "{% macro postgres__list_relations_without_caching(schema_relation) %}\n {% call statement('list_relations_without_caching', fetch_result=True) -%}\n select\n '{{ schema_relation.database }}' as database,\n tablename as name,\n schemaname as schema,\n 'table' as type\n from pg_tables\n where schemaname ilike '{{ schema_relation.schema }}'\n union all\n select\n '{{ schema_relation.database }}' as database,\n viewname as name,\n schemaname as schema,\n 'view' as type\n from pg_views\n where schemaname ilike '{{ schema_relation.schema }}'\n {% endcall %}\n {{ return(load_result('list_relations_without_caching').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8492, "supported_languages": null}, "macro.dbt_postgres.postgres__information_schema_name": {"name": "postgres__information_schema_name", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__information_schema_name", "macro_sql": "{% macro postgres__information_schema_name(database) -%}\n {% if database_name -%}\n {{ adapter.verify_database(database_name) }}\n {%- endif -%}\n information_schema\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.849643, "supported_languages": null}, "macro.dbt_postgres.postgres__list_schemas": {"name": "postgres__list_schemas", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__list_schemas", "macro_sql": "{% macro postgres__list_schemas(database) %}\n {% if database -%}\n {{ adapter.verify_database(database) }}\n {%- endif -%}\n {% call statement('list_schemas', fetch_result=True, auto_begin=False) %}\n select distinct nspname from pg_namespace\n {% endcall %}\n {{ return(load_result('list_schemas').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8505101, "supported_languages": null}, "macro.dbt_postgres.postgres__check_schema_exists": {"name": "postgres__check_schema_exists", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__check_schema_exists", "macro_sql": "{% macro postgres__check_schema_exists(information_schema, schema) -%}\n {% if information_schema.database -%}\n {{ adapter.verify_database(information_schema.database) }}\n {%- endif -%}\n {% call statement('check_schema_exists', fetch_result=True, auto_begin=False) %}\n select count(*) from pg_namespace where nspname = '{{ schema }}'\n {% endcall %}\n {{ return(load_result('check_schema_exists').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.851476, "supported_languages": null}, "macro.dbt_postgres.postgres__make_relation_with_suffix": {"name": "postgres__make_relation_with_suffix", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__make_relation_with_suffix", "macro_sql": "{% macro postgres__make_relation_with_suffix(base_relation, suffix, dstring) %}\n {% if dstring %}\n {% set dt = modules.datetime.datetime.now() %}\n {% set dtstring = dt.strftime(\"%H%M%S%f\") %}\n {% set suffix = suffix ~ dtstring %}\n {% endif %}\n {% set suffix_length = suffix|length %}\n {% set relation_max_name_length = base_relation.relation_max_name_length() %}\n {% if suffix_length > relation_max_name_length %}\n {% do exceptions.raise_compiler_error('Relation suffix is too long (' ~ suffix_length ~ ' characters). Maximum length is ' ~ relation_max_name_length ~ ' characters.') %}\n {% endif %}\n {% set identifier = base_relation.identifier[:relation_max_name_length - suffix_length] ~ suffix %}\n\n {{ return(base_relation.incorporate(path={\"identifier\": identifier })) }}\n\n {% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.853593, "supported_languages": null}, "macro.dbt_postgres.postgres__make_intermediate_relation": {"name": "postgres__make_intermediate_relation", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__make_intermediate_relation", "macro_sql": "{% macro postgres__make_intermediate_relation(base_relation, suffix) %}\n {{ return(postgres__make_relation_with_suffix(base_relation, suffix, dstring=False)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.854094, "supported_languages": null}, "macro.dbt_postgres.postgres__make_temp_relation": {"name": "postgres__make_temp_relation", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__make_temp_relation", "macro_sql": "{% macro postgres__make_temp_relation(base_relation, suffix) %}\n {% set temp_relation = postgres__make_relation_with_suffix(base_relation, suffix, dstring=True) %}\n {{ return(temp_relation.incorporate(path={\"schema\": none,\n \"database\": none})) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.855048, "supported_languages": null}, "macro.dbt_postgres.postgres__make_backup_relation": {"name": "postgres__make_backup_relation", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__make_backup_relation", "macro_sql": "{% macro postgres__make_backup_relation(base_relation, backup_relation_type, suffix) %}\n {% set backup_relation = postgres__make_relation_with_suffix(base_relation, suffix, dstring=False) %}\n {{ return(backup_relation.incorporate(type=backup_relation_type)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.855862, "supported_languages": null}, "macro.dbt_postgres.postgres_escape_comment": {"name": "postgres_escape_comment", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres_escape_comment", "macro_sql": "{% macro postgres_escape_comment(comment) -%}\n {% if comment is not string %}\n {% do exceptions.raise_compiler_error('cannot escape a non-string: ' ~ comment) %}\n {% endif %}\n {%- set magic = '$dbt_comment_literal_block$' -%}\n {%- if magic in comment -%}\n {%- do exceptions.raise_compiler_error('The string ' ~ magic ~ ' is not allowed in comments.') -%}\n {%- endif -%}\n {{ magic }}{{ comment }}{{ magic }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.856958, "supported_languages": null}, "macro.dbt_postgres.postgres__alter_relation_comment": {"name": "postgres__alter_relation_comment", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__alter_relation_comment", "macro_sql": "{% macro postgres__alter_relation_comment(relation, comment) %}\n {% set escaped_comment = postgres_escape_comment(comment) %}\n comment on {{ relation.type }} {{ relation }} is {{ escaped_comment }};\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres_escape_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8575392, "supported_languages": null}, "macro.dbt_postgres.postgres__alter_column_comment": {"name": "postgres__alter_column_comment", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__alter_column_comment", "macro_sql": "{% macro postgres__alter_column_comment(relation, column_dict) %}\n {% set existing_columns = adapter.get_columns_in_relation(relation) | map(attribute=\"name\") | list %}\n {% for column_name in column_dict if (column_name in existing_columns) %}\n {% set comment = column_dict[column_name]['description'] %}\n {% set escaped_comment = postgres_escape_comment(comment) %}\n comment on column {{ relation }}.{{ adapter.quote(column_name) if column_dict[column_name]['quote'] else column_name }} is {{ escaped_comment }};\n {% endfor %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres_escape_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8590431, "supported_languages": null}, "macro.dbt_postgres.postgres__get_show_grant_sql": {"name": "postgres__get_show_grant_sql", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__get_show_grant_sql", "macro_sql": "\n\n{%- macro postgres__get_show_grant_sql(relation) -%}\n select grantee, privilege_type\n from {{ relation.information_schema('role_table_grants') }}\n where grantor = current_role\n and grantee != current_role\n and table_schema = '{{ relation.schema }}'\n and table_name = '{{ relation.identifier }}'\n{%- endmacro -%}\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8596349, "supported_languages": null}, "macro.dbt_postgres.postgres__copy_grants": {"name": "postgres__copy_grants", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__copy_grants", "macro_sql": "{% macro postgres__copy_grants() %}\n {{ return(False) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.85995, "supported_languages": null}, "macro.dbt_postgres.postgres__get_incremental_default_sql": {"name": "postgres__get_incremental_default_sql", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/materializations/incremental_strategies.sql", "original_file_path": "macros/materializations/incremental_strategies.sql", "unique_id": "macro.dbt_postgres.postgres__get_incremental_default_sql", "macro_sql": "{% macro postgres__get_incremental_default_sql(arg_dict) %}\n\n {% if arg_dict[\"unique_key\"] %}\n {% do return(get_incremental_delete_insert_sql(arg_dict)) %}\n {% else %}\n {% do return(get_incremental_append_sql(arg_dict)) %}\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_incremental_delete_insert_sql", "macro.dbt.get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.860895, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_merge_sql": {"name": "postgres__snapshot_merge_sql", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/materializations/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshot_merge.sql", "unique_id": "macro.dbt_postgres.postgres__snapshot_merge_sql", "macro_sql": "{% macro postgres__snapshot_merge_sql(target, source, insert_cols) -%}\n {%- set insert_cols_csv = insert_cols | join(', ') -%}\n\n update {{ target }}\n set dbt_valid_to = DBT_INTERNAL_SOURCE.dbt_valid_to\n from {{ source }} as DBT_INTERNAL_SOURCE\n where DBT_INTERNAL_SOURCE.dbt_scd_id::text = {{ target }}.dbt_scd_id::text\n and DBT_INTERNAL_SOURCE.dbt_change_type::text in ('update'::text, 'delete'::text)\n and {{ target }}.dbt_valid_to is null;\n\n insert into {{ target }} ({{ insert_cols_csv }})\n select {% for column in insert_cols -%}\n DBT_INTERNAL_SOURCE.{{ column }} {%- if not loop.last %}, {%- endif %}\n {%- endfor %}\n from {{ source }} as DBT_INTERNAL_SOURCE\n where DBT_INTERNAL_SOURCE.dbt_change_type::text = 'insert'::text;\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8624861, "supported_languages": null}, "macro.dbt_postgres.postgres__dateadd": {"name": "postgres__dateadd", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "unique_id": "macro.dbt_postgres.postgres__dateadd", "macro_sql": "{% macro postgres__dateadd(datepart, interval, from_date_or_timestamp) %}\n\n {{ from_date_or_timestamp }} + ((interval '1 {{ datepart }}') * ({{ interval }}))\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.863015, "supported_languages": null}, "macro.dbt_postgres.postgres__listagg": {"name": "postgres__listagg", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "unique_id": "macro.dbt_postgres.postgres__listagg", "macro_sql": "{% macro postgres__listagg(measure, delimiter_text, order_by_clause, limit_num) -%}\n\n {% if limit_num -%}\n array_to_string(\n (array_agg(\n {{ measure }}\n {% if order_by_clause -%}\n {{ order_by_clause }}\n {%- endif %}\n ))[1:{{ limit_num }}],\n {{ delimiter_text }}\n )\n {%- else %}\n string_agg(\n {{ measure }},\n {{ delimiter_text }}\n {% if order_by_clause -%}\n {{ order_by_clause }}\n {%- endif %}\n )\n {%- endif %}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.864436, "supported_languages": null}, "macro.dbt_postgres.postgres__datediff": {"name": "postgres__datediff", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "unique_id": "macro.dbt_postgres.postgres__datediff", "macro_sql": "{% macro postgres__datediff(first_date, second_date, datepart) -%}\n\n {% if datepart == 'year' %}\n (date_part('year', ({{second_date}})::date) - date_part('year', ({{first_date}})::date))\n {% elif datepart == 'quarter' %}\n ({{ datediff(first_date, second_date, 'year') }} * 4 + date_part('quarter', ({{second_date}})::date) - date_part('quarter', ({{first_date}})::date))\n {% elif datepart == 'month' %}\n ({{ datediff(first_date, second_date, 'year') }} * 12 + date_part('month', ({{second_date}})::date) - date_part('month', ({{first_date}})::date))\n {% elif datepart == 'day' %}\n (({{second_date}})::date - ({{first_date}})::date)\n {% elif datepart == 'week' %}\n ({{ datediff(first_date, second_date, 'day') }} / 7 + case\n when date_part('dow', ({{first_date}})::timestamp) <= date_part('dow', ({{second_date}})::timestamp) then\n case when {{first_date}} <= {{second_date}} then 0 else -1 end\n else\n case when {{first_date}} <= {{second_date}} then 1 else 0 end\n end)\n {% elif datepart == 'hour' %}\n ({{ datediff(first_date, second_date, 'day') }} * 24 + date_part('hour', ({{second_date}})::timestamp) - date_part('hour', ({{first_date}})::timestamp))\n {% elif datepart == 'minute' %}\n ({{ datediff(first_date, second_date, 'hour') }} * 60 + date_part('minute', ({{second_date}})::timestamp) - date_part('minute', ({{first_date}})::timestamp))\n {% elif datepart == 'second' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60 + floor(date_part('second', ({{second_date}})::timestamp)) - floor(date_part('second', ({{first_date}})::timestamp)))\n {% elif datepart == 'millisecond' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60000 + floor(date_part('millisecond', ({{second_date}})::timestamp)) - floor(date_part('millisecond', ({{first_date}})::timestamp)))\n {% elif datepart == 'microsecond' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60000000 + floor(date_part('microsecond', ({{second_date}})::timestamp)) - floor(date_part('microsecond', ({{first_date}})::timestamp)))\n {% else %}\n {{ exceptions.raise_compiler_error(\"Unsupported datepart for macro datediff in postgres: {!r}\".format(datepart)) }}\n {% endif %}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.datediff"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.871541, "supported_languages": null}, "macro.dbt_postgres.postgres__any_value": {"name": "postgres__any_value", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "unique_id": "macro.dbt_postgres.postgres__any_value", "macro_sql": "{% macro postgres__any_value(expression) -%}\n\n min({{ expression }})\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8719308, "supported_languages": null}, "macro.dbt_postgres.postgres__last_day": {"name": "postgres__last_day", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "unique_id": "macro.dbt_postgres.postgres__last_day", "macro_sql": "{% macro postgres__last_day(date, datepart) -%}\n\n {%- if datepart == 'quarter' -%}\n -- postgres dateadd does not support quarter interval.\n cast(\n {{dbt.dateadd('day', '-1',\n dbt.dateadd('month', '3', dbt.date_trunc(datepart, date))\n )}}\n as date)\n {%- else -%}\n {{dbt.default_last_day(date, datepart)}}\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.dateadd", "macro.dbt.date_trunc", "macro.dbt.default_last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8731148, "supported_languages": null}, "macro.dbt_postgres.postgres__split_part": {"name": "postgres__split_part", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "unique_id": "macro.dbt_postgres.postgres__split_part", "macro_sql": "{% macro postgres__split_part(string_text, delimiter_text, part_number) %}\n\n {% if part_number >= 0 %}\n {{ dbt.default__split_part(string_text, delimiter_text, part_number) }}\n {% else %}\n {{ dbt._split_part_negative(string_text, delimiter_text, part_number) }}\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__split_part", "macro.dbt._split_part_negative"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8740962, "supported_languages": null}, "macro.dbt.run_hooks": {"name": "run_hooks", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.run_hooks", "macro_sql": "{% macro run_hooks(hooks, inside_transaction=True) %}\n {% for hook in hooks | selectattr('transaction', 'equalto', inside_transaction) %}\n {% if not inside_transaction and loop.first %}\n {% call statement(auto_begin=inside_transaction) %}\n commit;\n {% endcall %}\n {% endif %}\n {% set rendered = render(hook.get('sql')) | trim %}\n {% if (rendered | length) > 0 %}\n {% call statement(auto_begin=inside_transaction) %}\n {{ rendered }}\n {% endcall %}\n {% endif %}\n {% endfor %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.876752, "supported_languages": null}, "macro.dbt.make_hook_config": {"name": "make_hook_config", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.make_hook_config", "macro_sql": "{% macro make_hook_config(sql, inside_transaction) %}\n {{ tojson({\"sql\": sql, \"transaction\": inside_transaction}) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8772988, "supported_languages": null}, "macro.dbt.before_begin": {"name": "before_begin", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.before_begin", "macro_sql": "{% macro before_begin(sql) %}\n {{ make_hook_config(sql, inside_transaction=False) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.877683, "supported_languages": null}, "macro.dbt.in_transaction": {"name": "in_transaction", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.in_transaction", "macro_sql": "{% macro in_transaction(sql) %}\n {{ make_hook_config(sql, inside_transaction=True) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.878058, "supported_languages": null}, "macro.dbt.after_commit": {"name": "after_commit", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.after_commit", "macro_sql": "{% macro after_commit(sql) %}\n {{ make_hook_config(sql, inside_transaction=False) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.878428, "supported_languages": null}, "macro.dbt.set_sql_header": {"name": "set_sql_header", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "unique_id": "macro.dbt.set_sql_header", "macro_sql": "{% macro set_sql_header(config) -%}\n {{ config.set('sql_header', caller()) }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8792682, "supported_languages": null}, "macro.dbt.should_full_refresh": {"name": "should_full_refresh", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "unique_id": "macro.dbt.should_full_refresh", "macro_sql": "{% macro should_full_refresh() %}\n {% set config_full_refresh = config.get('full_refresh') %}\n {% if config_full_refresh is none %}\n {% set config_full_refresh = flags.FULL_REFRESH %}\n {% endif %}\n {% do return(config_full_refresh) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.880049, "supported_languages": null}, "macro.dbt.should_store_failures": {"name": "should_store_failures", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "unique_id": "macro.dbt.should_store_failures", "macro_sql": "{% macro should_store_failures() %}\n {% set config_store_failures = config.get('store_failures') %}\n {% if config_store_failures is none %}\n {% set config_store_failures = flags.STORE_FAILURES %}\n {% endif %}\n {% do return(config_store_failures) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.880833, "supported_languages": null}, "macro.dbt.snapshot_merge_sql": {"name": "snapshot_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshots/snapshot_merge.sql", "unique_id": "macro.dbt.snapshot_merge_sql", "macro_sql": "{% macro snapshot_merge_sql(target, source, insert_cols) -%}\n {{ adapter.dispatch('snapshot_merge_sql', 'dbt')(target, source, insert_cols) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.881776, "supported_languages": null}, "macro.dbt.default__snapshot_merge_sql": {"name": "default__snapshot_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshots/snapshot_merge.sql", "unique_id": "macro.dbt.default__snapshot_merge_sql", "macro_sql": "{% macro default__snapshot_merge_sql(target, source, insert_cols) -%}\n {%- set insert_cols_csv = insert_cols | join(', ') -%}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on DBT_INTERNAL_SOURCE.dbt_scd_id = DBT_INTERNAL_DEST.dbt_scd_id\n\n when matched\n and DBT_INTERNAL_DEST.dbt_valid_to is null\n and DBT_INTERNAL_SOURCE.dbt_change_type in ('update', 'delete')\n then update\n set dbt_valid_to = DBT_INTERNAL_SOURCE.dbt_valid_to\n\n when not matched\n and DBT_INTERNAL_SOURCE.dbt_change_type = 'insert'\n then insert ({{ insert_cols_csv }})\n values ({{ insert_cols_csv }})\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.882448, "supported_languages": null}, "macro.dbt.strategy_dispatch": {"name": "strategy_dispatch", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.strategy_dispatch", "macro_sql": "{% macro strategy_dispatch(name) -%}\n{% set original_name = name %}\n {% if '.' in name %}\n {% set package_name, name = name.split(\".\", 1) %}\n {% else %}\n {% set package_name = none %}\n {% endif %}\n\n {% if package_name is none %}\n {% set package_context = context %}\n {% elif package_name in context %}\n {% set package_context = context[package_name] %}\n {% else %}\n {% set error_msg %}\n Could not find package '{{package_name}}', called with '{{original_name}}'\n {% endset %}\n {{ exceptions.raise_compiler_error(error_msg | trim) }}\n {% endif %}\n\n {%- set search_name = 'snapshot_' ~ name ~ '_strategy' -%}\n\n {% if search_name not in package_context %}\n {% set error_msg %}\n The specified strategy macro '{{name}}' was not found in package '{{ package_name }}'\n {% endset %}\n {{ exceptions.raise_compiler_error(error_msg | trim) }}\n {% endif %}\n {{ return(package_context[search_name]) }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.88957, "supported_languages": null}, "macro.dbt.snapshot_hash_arguments": {"name": "snapshot_hash_arguments", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_hash_arguments", "macro_sql": "{% macro snapshot_hash_arguments(args) -%}\n {{ adapter.dispatch('snapshot_hash_arguments', 'dbt')(args) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.89017, "supported_languages": null}, "macro.dbt.default__snapshot_hash_arguments": {"name": "default__snapshot_hash_arguments", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.default__snapshot_hash_arguments", "macro_sql": "{% macro default__snapshot_hash_arguments(args) -%}\n md5({%- for arg in args -%}\n coalesce(cast({{ arg }} as varchar ), '')\n {% if not loop.last %} || '|' || {% endif %}\n {%- endfor -%})\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.890751, "supported_languages": null}, "macro.dbt.snapshot_timestamp_strategy": {"name": "snapshot_timestamp_strategy", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_timestamp_strategy", "macro_sql": "{% macro snapshot_timestamp_strategy(node, snapshotted_rel, current_rel, config, target_exists) %}\n {% set primary_key = config['unique_key'] %}\n {% set updated_at = config['updated_at'] %}\n {% set invalidate_hard_deletes = config.get('invalidate_hard_deletes', false) %}\n\n {#/*\n The snapshot relation might not have an {{ updated_at }} value if the\n snapshot strategy is changed from `check` to `timestamp`. We\n should use a dbt-created column for the comparison in the snapshot\n table instead of assuming that the user-supplied {{ updated_at }}\n will be present in the historical data.\n\n See https://github.com/dbt-labs/dbt-core/issues/2350\n */ #}\n {% set row_changed_expr -%}\n ({{ snapshotted_rel }}.dbt_valid_from < {{ current_rel }}.{{ updated_at }})\n {%- endset %}\n\n {% set scd_id_expr = snapshot_hash_arguments([primary_key, updated_at]) %}\n\n {% do return({\n \"unique_key\": primary_key,\n \"updated_at\": updated_at,\n \"row_changed\": row_changed_expr,\n \"scd_id\": scd_id_expr,\n \"invalidate_hard_deletes\": invalidate_hard_deletes\n }) %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.892682, "supported_languages": null}, "macro.dbt.snapshot_string_as_time": {"name": "snapshot_string_as_time", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_string_as_time", "macro_sql": "{% macro snapshot_string_as_time(timestamp) -%}\n {{ adapter.dispatch('snapshot_string_as_time', 'dbt')(timestamp) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_string_as_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.893109, "supported_languages": null}, "macro.dbt.default__snapshot_string_as_time": {"name": "default__snapshot_string_as_time", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.default__snapshot_string_as_time", "macro_sql": "{% macro default__snapshot_string_as_time(timestamp) %}\n {% do exceptions.raise_not_implemented(\n 'snapshot_string_as_time macro not implemented for adapter '+adapter.type()\n ) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.893554, "supported_languages": null}, "macro.dbt.snapshot_check_all_get_existing_columns": {"name": "snapshot_check_all_get_existing_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_check_all_get_existing_columns", "macro_sql": "{% macro snapshot_check_all_get_existing_columns(node, target_exists, check_cols_config) -%}\n {%- if not target_exists -%}\n {#-- no table yet -> return whatever the query does --#}\n {{ return((false, query_columns)) }}\n {%- endif -%}\n\n {#-- handle any schema changes --#}\n {%- set target_relation = adapter.get_relation(database=node.database, schema=node.schema, identifier=node.alias) -%}\n\n {% if check_cols_config == 'all' %}\n {%- set query_columns = get_columns_in_query(node['compiled_code']) -%}\n\n {% elif check_cols_config is iterable and (check_cols_config | length) > 0 %}\n {#-- query for proper casing/quoting, to support comparison below --#}\n {%- set select_check_cols_from_target -%}\n select {{ check_cols_config | join(', ') }} from ({{ node['compiled_code'] }}) subq\n {%- endset -%}\n {% set query_columns = get_columns_in_query(select_check_cols_from_target) %}\n\n {% else %}\n {% do exceptions.raise_compiler_error(\"Invalid value for 'check_cols': \" ~ check_cols_config) %}\n {% endif %}\n\n {%- set existing_cols = adapter.get_columns_in_relation(target_relation) | map(attribute = 'name') | list -%}\n {%- set ns = namespace() -%} {#-- handle for-loop scoping with a namespace --#}\n {%- set ns.column_added = false -%}\n\n {%- set intersection = [] -%}\n {%- for col in query_columns -%}\n {%- if col in existing_cols -%}\n {%- do intersection.append(adapter.quote(col)) -%}\n {%- else -%}\n {% set ns.column_added = true %}\n {%- endif -%}\n {%- endfor -%}\n {{ return((ns.column_added, intersection)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.get_columns_in_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.897105, "supported_languages": null}, "macro.dbt.snapshot_check_strategy": {"name": "snapshot_check_strategy", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_check_strategy", "macro_sql": "{% macro snapshot_check_strategy(node, snapshotted_rel, current_rel, config, target_exists) %}\n {% set check_cols_config = config['check_cols'] %}\n {% set primary_key = config['unique_key'] %}\n {% set invalidate_hard_deletes = config.get('invalidate_hard_deletes', false) %}\n {% set updated_at = config.get('updated_at', snapshot_get_time()) %}\n\n {% set column_added = false %}\n\n {% set column_added, check_cols = snapshot_check_all_get_existing_columns(node, target_exists, check_cols_config) %}\n\n {%- set row_changed_expr -%}\n (\n {%- if column_added -%}\n {{ get_true_sql() }}\n {%- else -%}\n {%- for col in check_cols -%}\n {{ snapshotted_rel }}.{{ col }} != {{ current_rel }}.{{ col }}\n or\n (\n (({{ snapshotted_rel }}.{{ col }} is null) and not ({{ current_rel }}.{{ col }} is null))\n or\n ((not {{ snapshotted_rel }}.{{ col }} is null) and ({{ current_rel }}.{{ col }} is null))\n )\n {%- if not loop.last %} or {% endif -%}\n {%- endfor -%}\n {%- endif -%}\n )\n {%- endset %}\n\n {% set scd_id_expr = snapshot_hash_arguments([primary_key, updated_at]) %}\n\n {% do return({\n \"unique_key\": primary_key,\n \"updated_at\": updated_at,\n \"row_changed\": row_changed_expr,\n \"scd_id\": scd_id_expr,\n \"invalidate_hard_deletes\": invalidate_hard_deletes\n }) %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.snapshot_get_time", "macro.dbt.snapshot_check_all_get_existing_columns", "macro.dbt.get_true_sql", "macro.dbt.snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.90045, "supported_languages": null}, "macro.dbt.create_columns": {"name": "create_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.create_columns", "macro_sql": "{% macro create_columns(relation, columns) %}\n {{ adapter.dispatch('create_columns', 'dbt')(relation, columns) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__create_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9079978, "supported_languages": null}, "macro.dbt.default__create_columns": {"name": "default__create_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__create_columns", "macro_sql": "{% macro default__create_columns(relation, columns) %}\n {% for column in columns %}\n {% call statement() %}\n alter table {{ relation }} add column \"{{ column.name }}\" {{ column.data_type }};\n {% endcall %}\n {% endfor %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.908781, "supported_languages": null}, "macro.dbt.post_snapshot": {"name": "post_snapshot", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.post_snapshot", "macro_sql": "{% macro post_snapshot(staging_relation) %}\n {{ adapter.dispatch('post_snapshot', 'dbt')(staging_relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__post_snapshot"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.909224, "supported_languages": null}, "macro.dbt.default__post_snapshot": {"name": "default__post_snapshot", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__post_snapshot", "macro_sql": "{% macro default__post_snapshot(staging_relation) %}\n {# no-op #}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.909463, "supported_languages": null}, "macro.dbt.get_true_sql": {"name": "get_true_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.get_true_sql", "macro_sql": "{% macro get_true_sql() %}\n {{ adapter.dispatch('get_true_sql', 'dbt')() }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_true_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9098508, "supported_languages": null}, "macro.dbt.default__get_true_sql": {"name": "default__get_true_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__get_true_sql", "macro_sql": "{% macro default__get_true_sql() %}\n {{ return('TRUE') }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9101608, "supported_languages": null}, "macro.dbt.snapshot_staging_table": {"name": "snapshot_staging_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.snapshot_staging_table", "macro_sql": "{% macro snapshot_staging_table(strategy, source_sql, target_relation) -%}\n {{ adapter.dispatch('snapshot_staging_table', 'dbt')(strategy, source_sql, target_relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__snapshot_staging_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.910683, "supported_languages": null}, "macro.dbt.default__snapshot_staging_table": {"name": "default__snapshot_staging_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__snapshot_staging_table", "macro_sql": "{% macro default__snapshot_staging_table(strategy, source_sql, target_relation) -%}\n\n with snapshot_query as (\n\n {{ source_sql }}\n\n ),\n\n snapshotted_data as (\n\n select *,\n {{ strategy.unique_key }} as dbt_unique_key\n\n from {{ target_relation }}\n where dbt_valid_to is null\n\n ),\n\n insertions_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n nullif({{ strategy.updated_at }}, {{ strategy.updated_at }}) as dbt_valid_to,\n {{ strategy.scd_id }} as dbt_scd_id\n\n from snapshot_query\n ),\n\n updates_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n {{ strategy.updated_at }} as dbt_valid_to\n\n from snapshot_query\n ),\n\n {%- if strategy.invalidate_hard_deletes %}\n\n deletes_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key\n from snapshot_query\n ),\n {% endif %}\n\n insertions as (\n\n select\n 'insert' as dbt_change_type,\n source_data.*\n\n from insertions_source_data as source_data\n left outer join snapshotted_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where snapshotted_data.dbt_unique_key is null\n or (\n snapshotted_data.dbt_unique_key is not null\n and (\n {{ strategy.row_changed }}\n )\n )\n\n ),\n\n updates as (\n\n select\n 'update' as dbt_change_type,\n source_data.*,\n snapshotted_data.dbt_scd_id\n\n from updates_source_data as source_data\n join snapshotted_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where (\n {{ strategy.row_changed }}\n )\n )\n\n {%- if strategy.invalidate_hard_deletes -%}\n ,\n\n deletes as (\n\n select\n 'delete' as dbt_change_type,\n source_data.*,\n {{ snapshot_get_time() }} as dbt_valid_from,\n {{ snapshot_get_time() }} as dbt_updated_at,\n {{ snapshot_get_time() }} as dbt_valid_to,\n snapshotted_data.dbt_scd_id\n\n from snapshotted_data\n left join deletes_source_data as source_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where source_data.dbt_unique_key is null\n )\n {%- endif %}\n\n select * from insertions\n union all\n select * from updates\n {%- if strategy.invalidate_hard_deletes %}\n union all\n select * from deletes\n {%- endif %}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.snapshot_get_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.912821, "supported_languages": null}, "macro.dbt.build_snapshot_table": {"name": "build_snapshot_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.build_snapshot_table", "macro_sql": "{% macro build_snapshot_table(strategy, sql) -%}\n {{ adapter.dispatch('build_snapshot_table', 'dbt')(strategy, sql) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__build_snapshot_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.913307, "supported_languages": null}, "macro.dbt.default__build_snapshot_table": {"name": "default__build_snapshot_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__build_snapshot_table", "macro_sql": "{% macro default__build_snapshot_table(strategy, sql) %}\n\n select *,\n {{ strategy.scd_id }} as dbt_scd_id,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n nullif({{ strategy.updated_at }}, {{ strategy.updated_at }}) as dbt_valid_to\n from (\n {{ sql }}\n ) sbq\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.913965, "supported_languages": null}, "macro.dbt.build_snapshot_staging_table": {"name": "build_snapshot_staging_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.build_snapshot_staging_table", "macro_sql": "{% macro build_snapshot_staging_table(strategy, sql, target_relation) %}\n {% set temp_relation = make_temp_relation(target_relation) %}\n\n {% set select = snapshot_staging_table(strategy, sql, target_relation) %}\n\n {% call statement('build_snapshot_staging_relation') %}\n {{ create_table_as(True, temp_relation, select) }}\n {% endcall %}\n\n {% do return(temp_relation) %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.make_temp_relation", "macro.dbt.snapshot_staging_table", "macro.dbt.statement", "macro.dbt.create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.915048, "supported_languages": null}, "macro.dbt.materialization_snapshot_default": {"name": "materialization_snapshot_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot.sql", "original_file_path": "macros/materializations/snapshots/snapshot.sql", "unique_id": "macro.dbt.materialization_snapshot_default", "macro_sql": "{% materialization snapshot, default %}\n {%- set config = model['config'] -%}\n\n {%- set target_table = model.get('alias', model.get('name')) -%}\n\n {%- set strategy_name = config.get('strategy') -%}\n {%- set unique_key = config.get('unique_key') %}\n -- grab current tables grants config for comparision later on\n {%- set grant_config = config.get('grants') -%}\n\n {% set target_relation_exists, target_relation = get_or_create_relation(\n database=model.database,\n schema=model.schema,\n identifier=target_table,\n type='table') -%}\n\n {%- if not target_relation.is_table -%}\n {% do exceptions.relation_wrong_type(target_relation, 'table') %}\n {%- endif -%}\n\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n {% set strategy_macro = strategy_dispatch(strategy_name) %}\n {% set strategy = strategy_macro(model, \"snapshotted_data\", \"source_data\", config, target_relation_exists) %}\n\n {% if not target_relation_exists %}\n\n {% set build_sql = build_snapshot_table(strategy, model['compiled_code']) %}\n {% set final_sql = create_table_as(False, target_relation, build_sql) %}\n\n {% else %}\n\n {{ adapter.valid_snapshot_target(target_relation) }}\n\n {% set staging_table = build_snapshot_staging_table(strategy, sql, target_relation) %}\n\n -- this may no-op if the database does not require column expansion\n {% do adapter.expand_target_column_types(from_relation=staging_table,\n to_relation=target_relation) %}\n\n {% set missing_columns = adapter.get_missing_columns(staging_table, target_relation)\n | rejectattr('name', 'equalto', 'dbt_change_type')\n | rejectattr('name', 'equalto', 'DBT_CHANGE_TYPE')\n | rejectattr('name', 'equalto', 'dbt_unique_key')\n | rejectattr('name', 'equalto', 'DBT_UNIQUE_KEY')\n | list %}\n\n {% do create_columns(target_relation, missing_columns) %}\n\n {% set source_columns = adapter.get_columns_in_relation(staging_table)\n | rejectattr('name', 'equalto', 'dbt_change_type')\n | rejectattr('name', 'equalto', 'DBT_CHANGE_TYPE')\n | rejectattr('name', 'equalto', 'dbt_unique_key')\n | rejectattr('name', 'equalto', 'DBT_UNIQUE_KEY')\n | list %}\n\n {% set quoted_source_columns = [] %}\n {% for column in source_columns %}\n {% do quoted_source_columns.append(adapter.quote(column.name)) %}\n {% endfor %}\n\n {% set final_sql = snapshot_merge_sql(\n target = target_relation,\n source = staging_table,\n insert_cols = quoted_source_columns\n )\n %}\n\n {% endif %}\n\n {% call statement('main') %}\n {{ final_sql }}\n {% endcall %}\n\n {% set should_revoke = should_revoke(target_relation_exists, full_refresh_mode=False) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if not target_relation_exists %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {{ adapter.commit() }}\n\n {% if staging_table is defined %}\n {% do post_snapshot(staging_table) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", "depends_on": {"macros": ["macro.dbt.get_or_create_relation", "macro.dbt.run_hooks", "macro.dbt.strategy_dispatch", "macro.dbt.build_snapshot_table", "macro.dbt.create_table_as", "macro.dbt.build_snapshot_staging_table", "macro.dbt.create_columns", "macro.dbt.snapshot_merge_sql", "macro.dbt.statement", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes", "macro.dbt.post_snapshot"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.928437, "supported_languages": ["sql"]}, "macro.dbt.materialization_test_default": {"name": "materialization_test_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/test.sql", "original_file_path": "macros/materializations/tests/test.sql", "unique_id": "macro.dbt.materialization_test_default", "macro_sql": "{%- materialization test, default -%}\n\n {% set relations = [] %}\n\n {% if should_store_failures() %}\n\n {% set identifier = model['alias'] %}\n {% set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}\n {% set target_relation = api.Relation.create(\n identifier=identifier, schema=schema, database=database, type='table') -%} %}\n\n {% if old_relation %}\n {% do adapter.drop_relation(old_relation) %}\n {% endif %}\n\n {% call statement(auto_begin=True) %}\n {{ create_table_as(False, target_relation, sql) }}\n {% endcall %}\n\n {% do relations.append(target_relation) %}\n\n {% set main_sql %}\n select *\n from {{ target_relation }}\n {% endset %}\n\n {{ adapter.commit() }}\n\n {% else %}\n\n {% set main_sql = sql %}\n\n {% endif %}\n\n {% set limit = config.get('limit') %}\n {% set fail_calc = config.get('fail_calc') %}\n {% set warn_if = config.get('warn_if') %}\n {% set error_if = config.get('error_if') %}\n\n {% call statement('main', fetch_result=True) -%}\n\n {{ get_test_sql(main_sql, fail_calc, warn_if, error_if, limit)}}\n\n {%- endcall %}\n\n {{ return({'relations': relations}) }}\n\n{%- endmaterialization -%}", "depends_on": {"macros": ["macro.dbt.should_store_failures", "macro.dbt.statement", "macro.dbt.create_table_as", "macro.dbt.get_test_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.933413, "supported_languages": ["sql"]}, "macro.dbt.get_test_sql": {"name": "get_test_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/helpers.sql", "original_file_path": "macros/materializations/tests/helpers.sql", "unique_id": "macro.dbt.get_test_sql", "macro_sql": "{% macro get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}\n {{ adapter.dispatch('get_test_sql', 'dbt')(main_sql, fail_calc, warn_if, error_if, limit) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_test_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.934442, "supported_languages": null}, "macro.dbt.default__get_test_sql": {"name": "default__get_test_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/helpers.sql", "original_file_path": "macros/materializations/tests/helpers.sql", "unique_id": "macro.dbt.default__get_test_sql", "macro_sql": "{% macro default__get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}\n select\n {{ fail_calc }} as failures,\n {{ fail_calc }} {{ warn_if }} as should_warn,\n {{ fail_calc }} {{ error_if }} as should_error\n from (\n {{ main_sql }}\n {{ \"limit \" ~ limit if limit != none }}\n ) dbt_internal_test\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.93521, "supported_languages": null}, "macro.dbt.get_where_subquery": {"name": "get_where_subquery", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/where_subquery.sql", "original_file_path": "macros/materializations/tests/where_subquery.sql", "unique_id": "macro.dbt.get_where_subquery", "macro_sql": "{% macro get_where_subquery(relation) -%}\n {% do return(adapter.dispatch('get_where_subquery', 'dbt')(relation)) %}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_where_subquery"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9360561, "supported_languages": null}, "macro.dbt.default__get_where_subquery": {"name": "default__get_where_subquery", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/where_subquery.sql", "original_file_path": "macros/materializations/tests/where_subquery.sql", "unique_id": "macro.dbt.default__get_where_subquery", "macro_sql": "{% macro default__get_where_subquery(relation) -%}\n {% set where = config.get('where', '') %}\n {% if where %}\n {%- set filtered -%}\n (select * from {{ relation }} where {{ where }}) dbt_subquery\n {%- endset -%}\n {% do return(filtered) %}\n {%- else -%}\n {% do return(relation) %}\n {%- endif -%}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9370618, "supported_languages": null}, "macro.dbt.get_quoted_csv": {"name": "get_quoted_csv", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.get_quoted_csv", "macro_sql": "{% macro get_quoted_csv(column_names) %}\n\n {% set quoted = [] %}\n {% for col in column_names -%}\n {%- do quoted.append(adapter.quote(col)) -%}\n {%- endfor %}\n\n {%- set dest_cols_csv = quoted | join(', ') -%}\n {{ return(dest_cols_csv) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.940309, "supported_languages": null}, "macro.dbt.diff_columns": {"name": "diff_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.diff_columns", "macro_sql": "{% macro diff_columns(source_columns, target_columns) %}\n\n {% set result = [] %}\n {% set source_names = source_columns | map(attribute = 'column') | list %}\n {% set target_names = target_columns | map(attribute = 'column') | list %}\n\n {# --check whether the name attribute exists in the target - this does not perform a data type check #}\n {% for sc in source_columns %}\n {% if sc.name not in target_names %}\n {{ result.append(sc) }}\n {% endif %}\n {% endfor %}\n\n {{ return(result) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9417028, "supported_languages": null}, "macro.dbt.diff_column_data_types": {"name": "diff_column_data_types", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.diff_column_data_types", "macro_sql": "{% macro diff_column_data_types(source_columns, target_columns) %}\n\n {% set result = [] %}\n {% for sc in source_columns %}\n {% set tc = target_columns | selectattr(\"name\", \"equalto\", sc.name) | list | first %}\n {% if tc %}\n {% if sc.data_type != tc.data_type and not sc.can_expand_to(other_column=tc) %}\n {{ result.append( { 'column_name': tc.name, 'new_type': sc.data_type } ) }}\n {% endif %}\n {% endif %}\n {% endfor %}\n\n {{ return(result) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.943423, "supported_languages": null}, "macro.dbt.get_merge_update_columns": {"name": "get_merge_update_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.get_merge_update_columns", "macro_sql": "{% macro get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}\n {{ return(adapter.dispatch('get_merge_update_columns', 'dbt')(merge_update_columns, merge_exclude_columns, dest_columns)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_merge_update_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.944038, "supported_languages": null}, "macro.dbt.default__get_merge_update_columns": {"name": "default__get_merge_update_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.default__get_merge_update_columns", "macro_sql": "{% macro default__get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}\n {%- set default_cols = dest_columns | map(attribute=\"quoted\") | list -%}\n\n {%- if merge_update_columns and merge_exclude_columns -%}\n {{ exceptions.raise_compiler_error(\n 'Model cannot specify merge_update_columns and merge_exclude_columns. Please update model to use only one config'\n )}}\n {%- elif merge_update_columns -%}\n {%- set update_columns = merge_update_columns -%}\n {%- elif merge_exclude_columns -%}\n {%- set update_columns = [] -%}\n {%- for column in dest_columns -%}\n {% if column.column | lower not in merge_exclude_columns | map(\"lower\") | list %}\n {%- do update_columns.append(column.quoted) -%}\n {% endif %}\n {%- endfor -%}\n {%- else -%}\n {%- set update_columns = default_cols -%}\n {%- endif -%}\n\n {{ return(update_columns) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9458601, "supported_languages": null}, "macro.dbt.get_merge_sql": {"name": "get_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.get_merge_sql", "macro_sql": "{% macro get_merge_sql(target, source, unique_key, dest_columns, predicates=none) -%}\n {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, predicates) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.954162, "supported_languages": null}, "macro.dbt.default__get_merge_sql": {"name": "default__get_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.default__get_merge_sql", "macro_sql": "{% macro default__get_merge_sql(target, source, unique_key, dest_columns, predicates) -%}\n {%- set predicates = [] if predicates is none else [] + predicates -%}\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n {%- set merge_update_columns = config.get('merge_update_columns') -%}\n {%- set merge_exclude_columns = config.get('merge_exclude_columns') -%}\n {%- set update_columns = get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {% if unique_key %}\n {% if unique_key is sequence and unique_key is not mapping and unique_key is not string %}\n {% for key in unique_key %}\n {% set this_key_match %}\n DBT_INTERNAL_SOURCE.{{ key }} = DBT_INTERNAL_DEST.{{ key }}\n {% endset %}\n {% do predicates.append(this_key_match) %}\n {% endfor %}\n {% else %}\n {% set unique_key_match %}\n DBT_INTERNAL_SOURCE.{{ unique_key }} = DBT_INTERNAL_DEST.{{ unique_key }}\n {% endset %}\n {% do predicates.append(unique_key_match) %}\n {% endif %}\n {% else %}\n {% do predicates.append('FALSE') %}\n {% endif %}\n\n {{ sql_header if sql_header is not none }}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on {{ predicates | join(' and ') }}\n\n {% if unique_key %}\n when matched then update set\n {% for column_name in update_columns -%}\n {{ column_name }} = DBT_INTERNAL_SOURCE.{{ column_name }}\n {%- if not loop.last %}, {%- endif %}\n {%- endfor %}\n {% endif %}\n\n when not matched then insert\n ({{ dest_cols_csv }})\n values\n ({{ dest_cols_csv }})\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_quoted_csv", "macro.dbt.get_merge_update_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.958357, "supported_languages": null}, "macro.dbt.get_delete_insert_merge_sql": {"name": "get_delete_insert_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.get_delete_insert_merge_sql", "macro_sql": "{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%}\n {{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_delete_insert_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.959069, "supported_languages": null}, "macro.dbt.default__get_delete_insert_merge_sql": {"name": "default__get_delete_insert_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.default__get_delete_insert_merge_sql", "macro_sql": "{% macro default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%}\n\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n\n {% if unique_key %}\n {% if unique_key is sequence and unique_key is not string %}\n delete from {{target }}\n using {{ source }}\n where (\n {% for key in unique_key %}\n {{ source }}.{{ key }} = {{ target }}.{{ key }}\n {{ \"and \" if not loop.last }}\n {% endfor %}\n );\n {% else %}\n delete from {{ target }}\n where (\n {{ unique_key }}) in (\n select ({{ unique_key }})\n from {{ source }}\n );\n\n {% endif %}\n {% endif %}\n\n insert into {{ target }} ({{ dest_cols_csv }})\n (\n select {{ dest_cols_csv }}\n from {{ source }}\n )\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9617019, "supported_languages": null}, "macro.dbt.get_insert_overwrite_merge_sql": {"name": "get_insert_overwrite_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.get_insert_overwrite_merge_sql", "macro_sql": "{% macro get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header=false) -%}\n {{ adapter.dispatch('get_insert_overwrite_merge_sql', 'dbt')(target, source, dest_columns, predicates, include_sql_header) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_insert_overwrite_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9624152, "supported_languages": null}, "macro.dbt.default__get_insert_overwrite_merge_sql": {"name": "default__get_insert_overwrite_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.default__get_insert_overwrite_merge_sql", "macro_sql": "{% macro default__get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header) -%}\n {#-- The only time include_sql_header is True: --#}\n {#-- BigQuery + insert_overwrite strategy + \"static\" partitions config --#}\n {#-- We should consider including the sql header at the materialization level instead --#}\n\n {%- set predicates = [] if predicates is none else [] + predicates -%}\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none and include_sql_header }}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on FALSE\n\n when not matched by source\n {% if predicates %} and {{ predicates | join(' and ') }} {% endif %}\n then delete\n\n when not matched then insert\n ({{ dest_cols_csv }})\n values\n ({{ dest_cols_csv }})\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.964027, "supported_languages": null}, "macro.dbt.is_incremental": {"name": "is_incremental", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/is_incremental.sql", "original_file_path": "macros/materializations/models/incremental/is_incremental.sql", "unique_id": "macro.dbt.is_incremental", "macro_sql": "{% macro is_incremental() %}\n {#-- do not run introspective queries in parsing #}\n {% if not execute %}\n {{ return(False) }}\n {% else %}\n {% set relation = adapter.get_relation(this.database, this.schema, this.table) %}\n {{ return(relation is not none\n and relation.type == 'table'\n and model.config.materialized == 'incremental'\n and not should_full_refresh()) }}\n {% endif %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.should_full_refresh"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9655309, "supported_languages": null}, "macro.dbt.get_incremental_append_sql": {"name": "get_incremental_append_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_append_sql", "macro_sql": "{% macro get_incremental_append_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_append_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.967351, "supported_languages": null}, "macro.dbt.default__get_incremental_append_sql": {"name": "default__get_incremental_append_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_append_sql", "macro_sql": "{% macro default__get_incremental_append_sql(arg_dict) %}\n\n {% do return(get_insert_into_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_insert_into_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.967958, "supported_languages": null}, "macro.dbt.get_incremental_delete_insert_sql": {"name": "get_incremental_delete_insert_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_delete_insert_sql", "macro_sql": "{% macro get_incremental_delete_insert_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_delete_insert_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_incremental_delete_insert_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9684448, "supported_languages": null}, "macro.dbt.default__get_incremental_delete_insert_sql": {"name": "default__get_incremental_delete_insert_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_delete_insert_sql", "macro_sql": "{% macro default__get_incremental_delete_insert_sql(arg_dict) %}\n\n {% do return(get_delete_insert_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"unique_key\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_delete_insert_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.969121, "supported_languages": null}, "macro.dbt.get_incremental_merge_sql": {"name": "get_incremental_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_merge_sql", "macro_sql": "{% macro get_incremental_merge_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_merge_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_incremental_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9696, "supported_languages": null}, "macro.dbt.default__get_incremental_merge_sql": {"name": "default__get_incremental_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_merge_sql", "macro_sql": "{% macro default__get_incremental_merge_sql(arg_dict) %}\n\n {% do return(get_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"unique_key\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.970359, "supported_languages": null}, "macro.dbt.get_incremental_insert_overwrite_sql": {"name": "get_incremental_insert_overwrite_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_insert_overwrite_sql", "macro_sql": "{% macro get_incremental_insert_overwrite_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_insert_overwrite_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_incremental_insert_overwrite_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.970861, "supported_languages": null}, "macro.dbt.default__get_incremental_insert_overwrite_sql": {"name": "default__get_incremental_insert_overwrite_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_insert_overwrite_sql", "macro_sql": "{% macro default__get_incremental_insert_overwrite_sql(arg_dict) %}\n\n {% do return(get_insert_overwrite_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"dest_columns\"], arg_dict[\"predicates\"])) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_insert_overwrite_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.971532, "supported_languages": null}, "macro.dbt.get_incremental_default_sql": {"name": "get_incremental_default_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_default_sql", "macro_sql": "{% macro get_incremental_default_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_default_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_incremental_default_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9720068, "supported_languages": null}, "macro.dbt.default__get_incremental_default_sql": {"name": "default__get_incremental_default_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_default_sql", "macro_sql": "{% macro default__get_incremental_default_sql(arg_dict) %}\n\n {% do return(get_incremental_append_sql(arg_dict)) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.972397, "supported_languages": null}, "macro.dbt.get_insert_into_sql": {"name": "get_insert_into_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_insert_into_sql", "macro_sql": "{% macro get_insert_into_sql(target_relation, temp_relation, dest_columns) %}\n\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n\n insert into {{ target_relation }} ({{ dest_cols_csv }})\n (\n select {{ dest_cols_csv }}\n from {{ temp_relation }}\n )\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9731112, "supported_languages": null}, "macro.dbt.materialization_incremental_default": {"name": "materialization_incremental_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/incremental.sql", "original_file_path": "macros/materializations/models/incremental/incremental.sql", "unique_id": "macro.dbt.materialization_incremental_default", "macro_sql": "{% materialization incremental, default -%}\n\n -- relations\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='table') -%}\n {%- set temp_relation = make_temp_relation(target_relation)-%}\n {%- set intermediate_relation = make_intermediate_relation(target_relation)-%}\n {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n\n -- configs\n {%- set unique_key = config.get('unique_key') -%}\n {%- set full_refresh_mode = (should_full_refresh() or existing_relation.is_view) -%}\n {%- set on_schema_change = incremental_validate_on_schema_change(config.get('on_schema_change'), default='ignore') -%}\n\n -- the temp_ and backup_ relations should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation. This has to happen before\n -- BEGIN, in a separate transaction\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation)-%}\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n {% set to_drop = [] %}\n\n {% if existing_relation is none %}\n {% set build_sql = get_create_table_as_sql(False, target_relation, sql) %}\n {% elif full_refresh_mode %}\n {% set build_sql = get_create_table_as_sql(False, intermediate_relation, sql) %}\n {% set need_swap = true %}\n {% else %}\n {% do run_query(get_create_table_as_sql(True, temp_relation, sql)) %}\n {% do adapter.expand_target_column_types(\n from_relation=temp_relation,\n to_relation=target_relation) %}\n {#-- Process schema changes. Returns dict of changes if successful. Use source columns for upserting/merging --#}\n {% set dest_columns = process_schema_changes(on_schema_change, temp_relation, existing_relation) %}\n {% if not dest_columns %}\n {% set dest_columns = adapter.get_columns_in_relation(existing_relation) %}\n {% endif %}\n\n {#-- Get the incremental_strategy, the macro to use for the strategy, and build the sql --#}\n {% set incremental_strategy = config.get('incremental_strategy') or 'default' %}\n {% set incremental_predicates = config.get('incremental_predicates', none) %}\n {% set strategy_sql_macro_func = adapter.get_incremental_strategy_macro(context, incremental_strategy) %}\n {% set strategy_arg_dict = ({'target_relation': target_relation, 'temp_relation': temp_relation, 'unique_key': unique_key, 'dest_columns': dest_columns, 'predicates': incremental_predicates }) %}\n {% set build_sql = strategy_sql_macro_func(strategy_arg_dict) %}\n\n {% endif %}\n\n {% call statement(\"main\") %}\n {{ build_sql }}\n {% endcall %}\n\n {% if need_swap %}\n {% do adapter.rename_relation(target_relation, backup_relation) %}\n {% do adapter.rename_relation(intermediate_relation, target_relation) %}\n {% do to_drop.append(backup_relation) %}\n {% endif %}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if existing_relation is none or existing_relation.is_view or should_full_refresh() %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n -- `COMMIT` happens here\n {% do adapter.commit() %}\n\n {% for rel in to_drop %}\n {% do adapter.drop_relation(rel) %}\n {% endfor %}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization %}", "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_temp_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.should_full_refresh", "macro.dbt.incremental_validate_on_schema_change", "macro.dbt.drop_relation_if_exists", "macro.dbt.run_hooks", "macro.dbt.get_create_table_as_sql", "macro.dbt.run_query", "macro.dbt.process_schema_changes", "macro.dbt.statement", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.984826, "supported_languages": ["sql"]}, "macro.dbt.incremental_validate_on_schema_change": {"name": "incremental_validate_on_schema_change", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "unique_id": "macro.dbt.incremental_validate_on_schema_change", "macro_sql": "{% macro incremental_validate_on_schema_change(on_schema_change, default='ignore') %}\n\n {% if on_schema_change not in ['sync_all_columns', 'append_new_columns', 'fail', 'ignore'] %}\n\n {% set log_message = 'Invalid value for on_schema_change (%s) specified. Setting default value of %s.' % (on_schema_change, default) %}\n {% do log(log_message) %}\n\n {{ return(default) }}\n\n {% else %}\n\n {{ return(on_schema_change) }}\n\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.995018, "supported_languages": null}, "macro.dbt.check_for_schema_changes": {"name": "check_for_schema_changes", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "unique_id": "macro.dbt.check_for_schema_changes", "macro_sql": "{% macro check_for_schema_changes(source_relation, target_relation) %}\n\n {% set schema_changed = False %}\n\n {%- set source_columns = adapter.get_columns_in_relation(source_relation) -%}\n {%- set target_columns = adapter.get_columns_in_relation(target_relation) -%}\n {%- set source_not_in_target = diff_columns(source_columns, target_columns) -%}\n {%- set target_not_in_source = diff_columns(target_columns, source_columns) -%}\n\n {% set new_target_types = diff_column_data_types(source_columns, target_columns) %}\n\n {% if source_not_in_target != [] %}\n {% set schema_changed = True %}\n {% elif target_not_in_source != [] or new_target_types != [] %}\n {% set schema_changed = True %}\n {% elif new_target_types != [] %}\n {% set schema_changed = True %}\n {% endif %}\n\n {% set changes_dict = {\n 'schema_changed': schema_changed,\n 'source_not_in_target': source_not_in_target,\n 'target_not_in_source': target_not_in_source,\n 'source_columns': source_columns,\n 'target_columns': target_columns,\n 'new_target_types': new_target_types\n } %}\n\n {% set msg %}\n In {{ target_relation }}:\n Schema changed: {{ schema_changed }}\n Source columns not in target: {{ source_not_in_target }}\n Target columns not in source: {{ target_not_in_source }}\n New column types: {{ new_target_types }}\n {% endset %}\n\n {% do log(msg) %}\n\n {{ return(changes_dict) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.diff_columns", "macro.dbt.diff_column_data_types"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9980521, "supported_languages": null}, "macro.dbt.sync_column_schemas": {"name": "sync_column_schemas", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "unique_id": "macro.dbt.sync_column_schemas", "macro_sql": "{% macro sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}\n\n {%- set add_to_target_arr = schema_changes_dict['source_not_in_target'] -%}\n\n {%- if on_schema_change == 'append_new_columns'-%}\n {%- if add_to_target_arr | length > 0 -%}\n {%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, none) -%}\n {%- endif -%}\n\n {% elif on_schema_change == 'sync_all_columns' %}\n {%- set remove_from_target_arr = schema_changes_dict['target_not_in_source'] -%}\n {%- set new_target_types = schema_changes_dict['new_target_types'] -%}\n\n {% if add_to_target_arr | length > 0 or remove_from_target_arr | length > 0 %}\n {%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, remove_from_target_arr) -%}\n {% endif %}\n\n {% if new_target_types != [] %}\n {% for ntt in new_target_types %}\n {% set column_name = ntt['column_name'] %}\n {% set new_type = ntt['new_type'] %}\n {% do alter_column_type(target_relation, column_name, new_type) %}\n {% endfor %}\n {% endif %}\n\n {% endif %}\n\n {% set schema_change_message %}\n In {{ target_relation }}:\n Schema change approach: {{ on_schema_change }}\n Columns added: {{ add_to_target_arr }}\n Columns removed: {{ remove_from_target_arr }}\n Data types changed: {{ new_target_types }}\n {% endset %}\n\n {% do log(schema_change_message) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.alter_relation_add_remove_columns", "macro.dbt.alter_column_type"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.00105, "supported_languages": null}, "macro.dbt.process_schema_changes": {"name": "process_schema_changes", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "unique_id": "macro.dbt.process_schema_changes", "macro_sql": "{% macro process_schema_changes(on_schema_change, source_relation, target_relation) %}\n\n {% if on_schema_change == 'ignore' %}\n\n {{ return({}) }}\n\n {% else %}\n\n {% set schema_changes_dict = check_for_schema_changes(source_relation, target_relation) %}\n\n {% if schema_changes_dict['schema_changed'] %}\n\n {% if on_schema_change == 'fail' %}\n\n {% set fail_msg %}\n The source and target schemas on this incremental model are out of sync!\n They can be reconciled in several ways:\n - set the `on_schema_change` config to either append_new_columns or sync_all_columns, depending on your situation.\n - Re-run the incremental model with `full_refresh: True` to update the target schema.\n - update the schema manually and re-run the process.\n\n Additional troubleshooting context:\n Source columns not in target: {{ schema_changes_dict['source_not_in_target'] }}\n Target columns not in source: {{ schema_changes_dict['target_not_in_source'] }}\n New column types: {{ schema_changes_dict['new_target_types'] }}\n {% endset %}\n\n {% do exceptions.raise_compiler_error(fail_msg) %}\n\n {# -- unless we ignore, run the sync operation per the config #}\n {% else %}\n\n {% do sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}\n\n {% endif %}\n\n {% endif %}\n\n {{ return(schema_changes_dict['source_columns']) }}\n\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.check_for_schema_changes", "macro.dbt.sync_column_schemas"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.003159, "supported_languages": null}, "macro.dbt.materialization_table_default": {"name": "materialization_table_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/table.sql", "original_file_path": "macros/materializations/models/table/table.sql", "unique_id": "macro.dbt.materialization_table_default", "macro_sql": "{% materialization table, default %}\n\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='table') %}\n {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}\n -- the intermediate_relation should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}\n /*\n See ../view/view.sql for more information about this relation.\n */\n {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n -- as above, the backup_relation should not already exist\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n\n -- drop the temp relations if they exist already in the database\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_table_as_sql(False, intermediate_relation, sql) }}\n {%- endcall %}\n\n -- cleanup\n {% if existing_relation is not none %}\n {{ adapter.rename_relation(existing_relation, backup_relation) }}\n {% endif %}\n\n {{ adapter.rename_relation(intermediate_relation, target_relation) }}\n\n {% do create_indexes(target_relation) %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n -- `COMMIT` happens here\n {{ adapter.commit() }}\n\n -- finally, drop the existing/backup relation after the commit\n {{ drop_relation_if_exists(backup_relation) }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n{% endmaterialization %}", "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.drop_relation_if_exists", "macro.dbt.run_hooks", "macro.dbt.statement", "macro.dbt.get_create_table_as_sql", "macro.dbt.create_indexes", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.009005, "supported_languages": ["sql"]}, "macro.dbt.get_create_table_as_sql": {"name": "get_create_table_as_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "unique_id": "macro.dbt.get_create_table_as_sql", "macro_sql": "{% macro get_create_table_as_sql(temporary, relation, sql) -%}\n {{ adapter.dispatch('get_create_table_as_sql', 'dbt')(temporary, relation, sql) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_create_table_as_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.010164, "supported_languages": null}, "macro.dbt.default__get_create_table_as_sql": {"name": "default__get_create_table_as_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "unique_id": "macro.dbt.default__get_create_table_as_sql", "macro_sql": "{% macro default__get_create_table_as_sql(temporary, relation, sql) -%}\n {{ return(create_table_as(temporary, relation, sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.010643, "supported_languages": null}, "macro.dbt.create_table_as": {"name": "create_table_as", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "unique_id": "macro.dbt.create_table_as", "macro_sql": "{% macro create_table_as(temporary, relation, compiled_code, language='sql') -%}\n {# backward compatibility for create_table_as that does not support language #}\n {% if language == \"sql\" %}\n {{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, compiled_code)}}\n {% else %}\n {{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, compiled_code, language) }}\n {% endif %}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.01173, "supported_languages": null}, "macro.dbt.default__create_table_as": {"name": "default__create_table_as", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "unique_id": "macro.dbt.default__create_table_as", "macro_sql": "{% macro default__create_table_as(temporary, relation, sql) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n\n create {% if temporary: -%}temporary{%- endif %} table\n {{ relation.include(database=(not temporary), schema=(not temporary)) }}\n as (\n {{ sql }}\n );\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.012759, "supported_languages": null}, "macro.dbt.materialization_view_default": {"name": "materialization_view_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/view.sql", "original_file_path": "macros/materializations/models/view/view.sql", "unique_id": "macro.dbt.materialization_view_default", "macro_sql": "{%- materialization view, default -%}\n\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='view') -%}\n {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}\n\n -- the intermediate_relation should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}\n /*\n This relation (probably) doesn't exist yet. If it does exist, it's a leftover from\n a previous run, and we're going to try to drop it immediately. At the end of this\n materialization, we're going to rename the \"existing_relation\" to this identifier,\n and then we're going to drop it. In order to make sure we run the correct one of:\n - drop view ...\n - drop table ...\n\n We need to set the type of this relation to be the type of the existing_relation, if it exists,\n or else \"view\" as a sane default if it does not. Note that if the existing_relation does not\n exist, then there is nothing to move out of the way and subsequentally drop. In that case,\n this relation will be effectively unused.\n */\n {%- set backup_relation_type = 'view' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n -- as above, the backup_relation should not already exist\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- drop the temp relations if they exist already in the database\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_view_as_sql(intermediate_relation, sql) }}\n {%- endcall %}\n\n -- cleanup\n -- move the existing view out of the way\n {% if existing_relation is not none %}\n {{ adapter.rename_relation(existing_relation, backup_relation) }}\n {% endif %}\n {{ adapter.rename_relation(intermediate_relation, target_relation) }}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {{ adapter.commit() }}\n\n {{ drop_relation_if_exists(backup_relation) }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization -%}", "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.run_hooks", "macro.dbt.drop_relation_if_exists", "macro.dbt.statement", "macro.dbt.get_create_view_as_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0184052, "supported_languages": ["sql"]}, "macro.dbt.handle_existing_table": {"name": "handle_existing_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/helpers.sql", "original_file_path": "macros/materializations/models/view/helpers.sql", "unique_id": "macro.dbt.handle_existing_table", "macro_sql": "{% macro handle_existing_table(full_refresh, old_relation) %}\n {{ adapter.dispatch('handle_existing_table', 'dbt')(full_refresh, old_relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__handle_existing_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.019115, "supported_languages": null}, "macro.dbt.default__handle_existing_table": {"name": "default__handle_existing_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/helpers.sql", "original_file_path": "macros/materializations/models/view/helpers.sql", "unique_id": "macro.dbt.default__handle_existing_table", "macro_sql": "{% macro default__handle_existing_table(full_refresh, old_relation) %}\n {{ log(\"Dropping relation \" ~ old_relation ~ \" because it is of type \" ~ old_relation.type) }}\n {{ adapter.drop_relation(old_relation) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.019679, "supported_languages": null}, "macro.dbt.create_or_replace_view": {"name": "create_or_replace_view", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_or_replace_view.sql", "original_file_path": "macros/materializations/models/view/create_or_replace_view.sql", "unique_id": "macro.dbt.create_or_replace_view", "macro_sql": "{% macro create_or_replace_view() %}\n {%- set identifier = model['alias'] -%}\n\n {%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}\n {%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}\n\n {%- set target_relation = api.Relation.create(\n identifier=identifier, schema=schema, database=database,\n type='view') -%}\n {% set grant_config = config.get('grants') %}\n\n {{ run_hooks(pre_hooks) }}\n\n -- If there's a table with the same name and we weren't told to full refresh,\n -- that's an error. If we were told to full refresh, drop it. This behavior differs\n -- for Snowflake and BigQuery, so multiple dispatch is used.\n {%- if old_relation is not none and old_relation.is_table -%}\n {{ handle_existing_table(should_full_refresh(), old_relation) }}\n {%- endif -%}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_view_as_sql(target_relation, sql) }}\n {%- endcall %}\n\n {% set should_revoke = should_revoke(exists_as_view, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=True) %}\n\n {{ run_hooks(post_hooks) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.run_hooks", "macro.dbt.handle_existing_table", "macro.dbt.should_full_refresh", "macro.dbt.statement", "macro.dbt.get_create_view_as_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0234811, "supported_languages": null}, "macro.dbt.get_create_view_as_sql": {"name": "get_create_view_as_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "unique_id": "macro.dbt.get_create_view_as_sql", "macro_sql": "{% macro get_create_view_as_sql(relation, sql) -%}\n {{ adapter.dispatch('get_create_view_as_sql', 'dbt')(relation, sql) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_create_view_as_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0243561, "supported_languages": null}, "macro.dbt.default__get_create_view_as_sql": {"name": "default__get_create_view_as_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "unique_id": "macro.dbt.default__get_create_view_as_sql", "macro_sql": "{% macro default__get_create_view_as_sql(relation, sql) -%}\n {{ return(create_view_as(relation, sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.create_view_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0248141, "supported_languages": null}, "macro.dbt.create_view_as": {"name": "create_view_as", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "unique_id": "macro.dbt.create_view_as", "macro_sql": "{% macro create_view_as(relation, sql) -%}\n {{ adapter.dispatch('create_view_as', 'dbt')(relation, sql) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__create_view_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.025273, "supported_languages": null}, "macro.dbt.default__create_view_as": {"name": "default__create_view_as", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "unique_id": "macro.dbt.default__create_view_as", "macro_sql": "{% macro default__create_view_as(relation, sql) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n create view {{ relation }} as (\n {{ sql }}\n );\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.025913, "supported_languages": null}, "macro.dbt.materialization_seed_default": {"name": "materialization_seed_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/seed.sql", "original_file_path": "macros/materializations/seeds/seed.sql", "unique_id": "macro.dbt.materialization_seed_default", "macro_sql": "{% materialization seed, default %}\n\n {%- set identifier = model['alias'] -%}\n {%- set full_refresh_mode = (should_full_refresh()) -%}\n\n {%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}\n\n {%- set exists_as_table = (old_relation is not none and old_relation.is_table) -%}\n {%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}\n\n {%- set grant_config = config.get('grants') -%}\n {%- set agate_table = load_agate_table() -%}\n -- grab current tables grants config for comparision later on\n\n {%- do store_result('agate_table', response='OK', agate_table=agate_table) -%}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% set create_table_sql = \"\" %}\n {% if exists_as_view %}\n {{ exceptions.raise_compiler_error(\"Cannot seed to '{}', it is a view\".format(old_relation)) }}\n {% elif exists_as_table %}\n {% set create_table_sql = reset_csv_table(model, full_refresh_mode, old_relation, agate_table) %}\n {% else %}\n {% set create_table_sql = create_csv_table(model, agate_table) %}\n {% endif %}\n\n {% set code = 'CREATE' if full_refresh_mode else 'INSERT' %}\n {% set rows_affected = (agate_table.rows | length) %}\n {% set sql = load_csv_rows(model, agate_table) %}\n\n {% call noop_statement('main', code ~ ' ' ~ rows_affected, code, rows_affected) %}\n {{ get_csv_sql(create_table_sql, sql) }};\n {% endcall %}\n\n {% set target_relation = this.incorporate(type='table') %}\n\n {% set should_revoke = should_revoke(old_relation, full_refresh_mode) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if full_refresh_mode or not exists_as_table %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n -- `COMMIT` happens here\n {{ adapter.commit() }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", "depends_on": {"macros": ["macro.dbt.should_full_refresh", "macro.dbt.run_hooks", "macro.dbt.reset_csv_table", "macro.dbt.create_csv_table", "macro.dbt.load_csv_rows", "macro.dbt.noop_statement", "macro.dbt.get_csv_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.033872, "supported_languages": ["sql"]}, "macro.dbt.create_csv_table": {"name": "create_csv_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.create_csv_table", "macro_sql": "{% macro create_csv_table(model, agate_table) -%}\n {{ adapter.dispatch('create_csv_table', 'dbt')(model, agate_table) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__create_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0429308, "supported_languages": null}, "macro.dbt.default__create_csv_table": {"name": "default__create_csv_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__create_csv_table", "macro_sql": "{% macro default__create_csv_table(model, agate_table) %}\n {%- set column_override = model['config'].get('column_types', {}) -%}\n {%- set quote_seed_column = model['config'].get('quote_columns', None) -%}\n\n {% set sql %}\n create table {{ this.render() }} (\n {%- for col_name in agate_table.column_names -%}\n {%- set inferred_type = adapter.convert_type(agate_table, loop.index0) -%}\n {%- set type = column_override.get(col_name, inferred_type) -%}\n {%- set column_name = (col_name | string) -%}\n {{ adapter.quote_seed_column(column_name, quote_seed_column) }} {{ type }} {%- if not loop.last -%}, {%- endif -%}\n {%- endfor -%}\n )\n {% endset %}\n\n {% call statement('_') -%}\n {{ sql }}\n {%- endcall %}\n\n {{ return(sql) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0452118, "supported_languages": null}, "macro.dbt.reset_csv_table": {"name": "reset_csv_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.reset_csv_table", "macro_sql": "{% macro reset_csv_table(model, full_refresh, old_relation, agate_table) -%}\n {{ adapter.dispatch('reset_csv_table', 'dbt')(model, full_refresh, old_relation, agate_table) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__reset_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0458, "supported_languages": null}, "macro.dbt.default__reset_csv_table": {"name": "default__reset_csv_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__reset_csv_table", "macro_sql": "{% macro default__reset_csv_table(model, full_refresh, old_relation, agate_table) %}\n {% set sql = \"\" %}\n {% if full_refresh %}\n {{ adapter.drop_relation(old_relation) }}\n {% set sql = create_csv_table(model, agate_table) %}\n {% else %}\n {{ adapter.truncate_relation(old_relation) }}\n {% set sql = \"truncate table \" ~ old_relation %}\n {% endif %}\n\n {{ return(sql) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.create_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.047039, "supported_languages": null}, "macro.dbt.get_csv_sql": {"name": "get_csv_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.get_csv_sql", "macro_sql": "{% macro get_csv_sql(create_or_truncate_sql, insert_sql) %}\n {{ adapter.dispatch('get_csv_sql', 'dbt')(create_or_truncate_sql, insert_sql) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_csv_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.047529, "supported_languages": null}, "macro.dbt.default__get_csv_sql": {"name": "default__get_csv_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__get_csv_sql", "macro_sql": "{% macro default__get_csv_sql(create_or_truncate_sql, insert_sql) %}\n {{ create_or_truncate_sql }};\n -- dbt seed --\n {{ insert_sql }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.047877, "supported_languages": null}, "macro.dbt.get_binding_char": {"name": "get_binding_char", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.get_binding_char", "macro_sql": "{% macro get_binding_char() -%}\n {{ adapter.dispatch('get_binding_char', 'dbt')() }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_binding_char"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0482402, "supported_languages": null}, "macro.dbt.default__get_binding_char": {"name": "default__get_binding_char", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__get_binding_char", "macro_sql": "{% macro default__get_binding_char() %}\n {{ return('%s') }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.048544, "supported_languages": null}, "macro.dbt.get_batch_size": {"name": "get_batch_size", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.get_batch_size", "macro_sql": "{% macro get_batch_size() -%}\n {{ return(adapter.dispatch('get_batch_size', 'dbt')()) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_batch_size"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.048952, "supported_languages": null}, "macro.dbt.default__get_batch_size": {"name": "default__get_batch_size", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__get_batch_size", "macro_sql": "{% macro default__get_batch_size() %}\n {{ return(10000) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0492558, "supported_languages": null}, "macro.dbt.get_seed_column_quoted_csv": {"name": "get_seed_column_quoted_csv", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.get_seed_column_quoted_csv", "macro_sql": "{% macro get_seed_column_quoted_csv(model, column_names) %}\n {%- set quote_seed_column = model['config'].get('quote_columns', None) -%}\n {% set quoted = [] %}\n {% for col in column_names -%}\n {%- do quoted.append(adapter.quote_seed_column(col, quote_seed_column)) -%}\n {%- endfor %}\n\n {%- set dest_cols_csv = quoted | join(', ') -%}\n {{ return(dest_cols_csv) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0504608, "supported_languages": null}, "macro.dbt.load_csv_rows": {"name": "load_csv_rows", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.load_csv_rows", "macro_sql": "{% macro load_csv_rows(model, agate_table) -%}\n {{ adapter.dispatch('load_csv_rows', 'dbt')(model, agate_table) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__load_csv_rows"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.050931, "supported_languages": null}, "macro.dbt.default__load_csv_rows": {"name": "default__load_csv_rows", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__load_csv_rows", "macro_sql": "{% macro default__load_csv_rows(model, agate_table) %}\n\n {% set batch_size = get_batch_size() %}\n\n {% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %}\n {% set bindings = [] %}\n\n {% set statements = [] %}\n\n {% for chunk in agate_table.rows | batch(batch_size) %}\n {% set bindings = [] %}\n\n {% for row in chunk %}\n {% do bindings.extend(row) %}\n {% endfor %}\n\n {% set sql %}\n insert into {{ this.render() }} ({{ cols_sql }}) values\n {% for row in chunk -%}\n ({%- for column in agate_table.column_names -%}\n {{ get_binding_char() }}\n {%- if not loop.last%},{%- endif %}\n {%- endfor -%})\n {%- if not loop.last%},{%- endif %}\n {%- endfor %}\n {% endset %}\n\n {% do adapter.add_query(sql, bindings=bindings, abridge_sql_log=True) %}\n\n {% if loop.index0 == 0 %}\n {% do statements.append(sql) %}\n {% endif %}\n {% endfor %}\n\n {# Return SQL so we can render it out into the compiled files #}\n {{ return(statements[0]) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_batch_size", "macro.dbt.get_seed_column_quoted_csv", "macro.dbt.get_binding_char"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0541089, "supported_languages": null}, "macro.dbt.generate_alias_name": {"name": "generate_alias_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_alias.sql", "original_file_path": "macros/get_custom_name/get_custom_alias.sql", "unique_id": "macro.dbt.generate_alias_name", "macro_sql": "{% macro generate_alias_name(custom_alias_name=none, node=none) -%}\n {% do return(adapter.dispatch('generate_alias_name', 'dbt')(custom_alias_name, node)) %}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__generate_alias_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0551748, "supported_languages": null}, "macro.dbt.default__generate_alias_name": {"name": "default__generate_alias_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_alias.sql", "original_file_path": "macros/get_custom_name/get_custom_alias.sql", "unique_id": "macro.dbt.default__generate_alias_name", "macro_sql": "{% macro default__generate_alias_name(custom_alias_name=none, node=none) -%}\n\n {%- if custom_alias_name is none -%}\n\n {{ node.name }}\n\n {%- else -%}\n\n {{ custom_alias_name | trim }}\n\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.055804, "supported_languages": null}, "macro.dbt.generate_schema_name": {"name": "generate_schema_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "unique_id": "macro.dbt.generate_schema_name", "macro_sql": "{% macro generate_schema_name(custom_schema_name=none, node=none) -%}\n {{ return(adapter.dispatch('generate_schema_name', 'dbt')(custom_schema_name, node)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__generate_schema_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0569532, "supported_languages": null}, "macro.dbt.default__generate_schema_name": {"name": "default__generate_schema_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "unique_id": "macro.dbt.default__generate_schema_name", "macro_sql": "{% macro default__generate_schema_name(custom_schema_name, node) -%}\n\n {%- set default_schema = target.schema -%}\n {%- if custom_schema_name is none -%}\n\n {{ default_schema }}\n\n {%- else -%}\n\n {{ default_schema }}_{{ custom_schema_name | trim }}\n\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.05763, "supported_languages": null}, "macro.dbt.generate_schema_name_for_env": {"name": "generate_schema_name_for_env", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "unique_id": "macro.dbt.generate_schema_name_for_env", "macro_sql": "{% macro generate_schema_name_for_env(custom_schema_name, node) -%}\n\n {%- set default_schema = target.schema -%}\n {%- if target.name == 'prod' and custom_schema_name is not none -%}\n\n {{ custom_schema_name | trim }}\n\n {%- else -%}\n\n {{ default_schema }}\n\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0583591, "supported_languages": null}, "macro.dbt.generate_database_name": {"name": "generate_database_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_database.sql", "original_file_path": "macros/get_custom_name/get_custom_database.sql", "unique_id": "macro.dbt.generate_database_name", "macro_sql": "{% macro generate_database_name(custom_database_name=none, node=none) -%}\n {% do return(adapter.dispatch('generate_database_name', 'dbt')(custom_database_name, node)) %}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__generate_database_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.059273, "supported_languages": null}, "macro.dbt.default__generate_database_name": {"name": "default__generate_database_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_database.sql", "original_file_path": "macros/get_custom_name/get_custom_database.sql", "unique_id": "macro.dbt.default__generate_database_name", "macro_sql": "{% macro default__generate_database_name(custom_database_name=none, node=none) -%}\n {%- set default_database = target.database -%}\n {%- if custom_database_name is none -%}\n\n {{ default_database }}\n\n {%- else -%}\n\n {{ custom_database_name }}\n\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0599208, "supported_languages": null}, "macro.dbt.default__test_relationships": {"name": "default__test_relationships", "resource_type": "macro", "package_name": "dbt", "path": "macros/generic_test_sql/relationships.sql", "original_file_path": "macros/generic_test_sql/relationships.sql", "unique_id": "macro.dbt.default__test_relationships", "macro_sql": "{% macro default__test_relationships(model, column_name, to, field) %}\n\nwith child as (\n select {{ column_name }} as from_field\n from {{ model }}\n where {{ column_name }} is not null\n),\n\nparent as (\n select {{ field }} as to_field\n from {{ to }}\n)\n\nselect\n from_field\n\nfrom child\nleft join parent\n on child.from_field = parent.to_field\n\nwhere parent.to_field is null\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.060704, "supported_languages": null}, "macro.dbt.default__test_not_null": {"name": "default__test_not_null", "resource_type": "macro", "package_name": "dbt", "path": "macros/generic_test_sql/not_null.sql", "original_file_path": "macros/generic_test_sql/not_null.sql", "unique_id": "macro.dbt.default__test_not_null", "macro_sql": "{% macro default__test_not_null(model, column_name) %}\n\n{% set column_list = '*' if should_store_failures() else column_name %}\n\nselect {{ column_list }}\nfrom {{ model }}\nwhere {{ column_name }} is null\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.should_store_failures"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.061424, "supported_languages": null}, "macro.dbt.default__test_unique": {"name": "default__test_unique", "resource_type": "macro", "package_name": "dbt", "path": "macros/generic_test_sql/unique.sql", "original_file_path": "macros/generic_test_sql/unique.sql", "unique_id": "macro.dbt.default__test_unique", "macro_sql": "{% macro default__test_unique(model, column_name) %}\n\nselect\n {{ column_name }} as unique_field,\n count(*) as n_records\n\nfrom {{ model }}\nwhere {{ column_name }} is not null\ngroup by {{ column_name }}\nhaving count(*) > 1\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.062015, "supported_languages": null}, "macro.dbt.default__test_accepted_values": {"name": "default__test_accepted_values", "resource_type": "macro", "package_name": "dbt", "path": "macros/generic_test_sql/accepted_values.sql", "original_file_path": "macros/generic_test_sql/accepted_values.sql", "unique_id": "macro.dbt.default__test_accepted_values", "macro_sql": "{% macro default__test_accepted_values(model, column_name, values, quote=True) %}\n\nwith all_values as (\n\n select\n {{ column_name }} as value_field,\n count(*) as n_records\n\n from {{ model }}\n group by {{ column_name }}\n\n)\n\nselect *\nfrom all_values\nwhere value_field not in (\n {% for value in values -%}\n {% if quote -%}\n '{{ value }}'\n {%- else -%}\n {{ value }}\n {%- endif -%}\n {%- if not loop.last -%},{%- endif %}\n {%- endfor %}\n)\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.063346, "supported_languages": null}, "macro.dbt.statement": {"name": "statement", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "unique_id": "macro.dbt.statement", "macro_sql": "\n{%- macro statement(name=None, fetch_result=False, auto_begin=True, language='sql') -%}\n {%- if execute: -%}\n {%- set compiled_code = caller() -%}\n\n {%- if name == 'main' -%}\n {{ log('Writing runtime {} for node \"{}\"'.format(language, model['unique_id'])) }}\n {{ write(compiled_code) }}\n {%- endif -%}\n {%- if language == 'sql'-%}\n {%- set res, table = adapter.execute(compiled_code, auto_begin=auto_begin, fetch=fetch_result) -%}\n {%- elif language == 'python' -%}\n {%- set res = submit_python_job(model, compiled_code) -%}\n {#-- TODO: What should table be for python models? --#}\n {%- set table = None -%}\n {%- else -%}\n {% do exceptions.raise_compiler_error(\"statement macro didn't get supported language\") %}\n {%- endif -%}\n\n {%- if name is not none -%}\n {{ store_result(name, response=res, agate_table=table) }}\n {%- endif -%}\n\n {%- endif -%}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.066719, "supported_languages": null}, "macro.dbt.noop_statement": {"name": "noop_statement", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "unique_id": "macro.dbt.noop_statement", "macro_sql": "{% macro noop_statement(name=None, message=None, code=None, rows_affected=None, res=None) -%}\n {%- set sql = caller() -%}\n\n {%- if name == 'main' -%}\n {{ log('Writing runtime SQL for node \"{}\"'.format(model['unique_id'])) }}\n {{ write(sql) }}\n {%- endif -%}\n\n {%- if name is not none -%}\n {{ store_raw_result(name, message=message, code=code, rows_affected=rows_affected, agate_table=res) }}\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.068168, "supported_languages": null}, "macro.dbt.run_query": {"name": "run_query", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "unique_id": "macro.dbt.run_query", "macro_sql": "{% macro run_query(sql) %}\n {% call statement(\"run_query_statement\", fetch_result=true, auto_begin=false) %}\n {{ sql }}\n {% endcall %}\n\n {% do return(load_result(\"run_query_statement\").table) %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.068917, "supported_languages": null}, "macro.dbt.convert_datetime": {"name": "convert_datetime", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "unique_id": "macro.dbt.convert_datetime", "macro_sql": "{% macro convert_datetime(date_str, date_fmt) %}\n\n {% set error_msg -%}\n The provided partition date '{{ date_str }}' does not match the expected format '{{ date_fmt }}'\n {%- endset %}\n\n {% set res = try_or_compiler_error(error_msg, modules.datetime.datetime.strptime, date_str.strip(), date_fmt) %}\n {{ return(res) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.072692, "supported_languages": null}, "macro.dbt.dates_in_range": {"name": "dates_in_range", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "unique_id": "macro.dbt.dates_in_range", "macro_sql": "{% macro dates_in_range(start_date_str, end_date_str=none, in_fmt=\"%Y%m%d\", out_fmt=\"%Y%m%d\") %}\n {% set end_date_str = start_date_str if end_date_str is none else end_date_str %}\n\n {% set start_date = convert_datetime(start_date_str, in_fmt) %}\n {% set end_date = convert_datetime(end_date_str, in_fmt) %}\n\n {% set day_count = (end_date - start_date).days %}\n {% if day_count < 0 %}\n {% set msg -%}\n Partiton start date is after the end date ({{ start_date }}, {{ end_date }})\n {%- endset %}\n\n {{ exceptions.raise_compiler_error(msg, model) }}\n {% endif %}\n\n {% set date_list = [] %}\n {% for i in range(0, day_count + 1) %}\n {% set the_date = (modules.datetime.timedelta(days=i) + start_date) %}\n {% if not out_fmt %}\n {% set _ = date_list.append(the_date) %}\n {% else %}\n {% set _ = date_list.append(the_date.strftime(out_fmt)) %}\n {% endif %}\n {% endfor %}\n\n {{ return(date_list) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.convert_datetime"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0760698, "supported_languages": null}, "macro.dbt.partition_range": {"name": "partition_range", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "unique_id": "macro.dbt.partition_range", "macro_sql": "{% macro partition_range(raw_partition_date, date_fmt='%Y%m%d') %}\n {% set partition_range = (raw_partition_date | string).split(\",\") %}\n\n {% if (partition_range | length) == 1 %}\n {% set start_date = partition_range[0] %}\n {% set end_date = none %}\n {% elif (partition_range | length) == 2 %}\n {% set start_date = partition_range[0] %}\n {% set end_date = partition_range[1] %}\n {% else %}\n {{ exceptions.raise_compiler_error(\"Invalid partition time. Expected format: {Start Date}[,{End Date}]. Got: \" ~ raw_partition_date) }}\n {% endif %}\n\n {{ return(dates_in_range(start_date, end_date, in_fmt=date_fmt)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.dates_in_range"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.077999, "supported_languages": null}, "macro.dbt.py_current_timestring": {"name": "py_current_timestring", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "unique_id": "macro.dbt.py_current_timestring", "macro_sql": "{% macro py_current_timestring() %}\n {% set dt = modules.datetime.datetime.now() %}\n {% do return(dt.strftime(\"%Y%m%d%H%M%S%f\")) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.078628, "supported_languages": null}, "macro.dbt.except": {"name": "except", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/except.sql", "original_file_path": "macros/utils/except.sql", "unique_id": "macro.dbt.except", "macro_sql": "{% macro except() %}\n {{ return(adapter.dispatch('except', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__except"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.079215, "supported_languages": null}, "macro.dbt.default__except": {"name": "default__except", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/except.sql", "original_file_path": "macros/utils/except.sql", "unique_id": "macro.dbt.default__except", "macro_sql": "{% macro default__except() %}\n\n except\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.079419, "supported_languages": null}, "macro.dbt.replace": {"name": "replace", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/replace.sql", "original_file_path": "macros/utils/replace.sql", "unique_id": "macro.dbt.replace", "macro_sql": "{% macro replace(field, old_chars, new_chars) -%}\n {{ return(adapter.dispatch('replace', 'dbt') (field, old_chars, new_chars)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__replace"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.080247, "supported_languages": null}, "macro.dbt.default__replace": {"name": "default__replace", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/replace.sql", "original_file_path": "macros/utils/replace.sql", "unique_id": "macro.dbt.default__replace", "macro_sql": "{% macro default__replace(field, old_chars, new_chars) %}\n\n replace(\n {{ field }},\n {{ old_chars }},\n {{ new_chars }}\n )\n\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.080682, "supported_languages": null}, "macro.dbt.concat": {"name": "concat", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/concat.sql", "original_file_path": "macros/utils/concat.sql", "unique_id": "macro.dbt.concat", "macro_sql": "{% macro concat(fields) -%}\n {{ return(adapter.dispatch('concat', 'dbt')(fields)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__concat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.081283, "supported_languages": null}, "macro.dbt.default__concat": {"name": "default__concat", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/concat.sql", "original_file_path": "macros/utils/concat.sql", "unique_id": "macro.dbt.default__concat", "macro_sql": "{% macro default__concat(fields) -%}\n {{ fields|join(' || ') }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0816069, "supported_languages": null}, "macro.dbt.length": {"name": "length", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/length.sql", "original_file_path": "macros/utils/length.sql", "unique_id": "macro.dbt.length", "macro_sql": "{% macro length(expression) -%}\n {{ return(adapter.dispatch('length', 'dbt') (expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__length"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.082229, "supported_languages": null}, "macro.dbt.default__length": {"name": "default__length", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/length.sql", "original_file_path": "macros/utils/length.sql", "unique_id": "macro.dbt.default__length", "macro_sql": "{% macro default__length(expression) %}\n\n length(\n {{ expression }}\n )\n\n{%- endmacro -%}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.082507, "supported_languages": null}, "macro.dbt.dateadd": {"name": "dateadd", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "unique_id": "macro.dbt.dateadd", "macro_sql": "{% macro dateadd(datepart, interval, from_date_or_timestamp) %}\n {{ return(adapter.dispatch('dateadd', 'dbt')(datepart, interval, from_date_or_timestamp)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__dateadd"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.083297, "supported_languages": null}, "macro.dbt.default__dateadd": {"name": "default__dateadd", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "unique_id": "macro.dbt.default__dateadd", "macro_sql": "{% macro default__dateadd(datepart, interval, from_date_or_timestamp) %}\n\n dateadd(\n {{ datepart }},\n {{ interval }},\n {{ from_date_or_timestamp }}\n )\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0837162, "supported_languages": null}, "macro.dbt.intersect": {"name": "intersect", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/intersect.sql", "original_file_path": "macros/utils/intersect.sql", "unique_id": "macro.dbt.intersect", "macro_sql": "{% macro intersect() %}\n {{ return(adapter.dispatch('intersect', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__intersect"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.084266, "supported_languages": null}, "macro.dbt.default__intersect": {"name": "default__intersect", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/intersect.sql", "original_file_path": "macros/utils/intersect.sql", "unique_id": "macro.dbt.default__intersect", "macro_sql": "{% macro default__intersect() %}\n\n intersect\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0844731, "supported_languages": null}, "macro.dbt.escape_single_quotes": {"name": "escape_single_quotes", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/escape_single_quotes.sql", "original_file_path": "macros/utils/escape_single_quotes.sql", "unique_id": "macro.dbt.escape_single_quotes", "macro_sql": "{% macro escape_single_quotes(expression) %}\n {{ return(adapter.dispatch('escape_single_quotes', 'dbt') (expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__escape_single_quotes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.08511, "supported_languages": null}, "macro.dbt.default__escape_single_quotes": {"name": "default__escape_single_quotes", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/escape_single_quotes.sql", "original_file_path": "macros/utils/escape_single_quotes.sql", "unique_id": "macro.dbt.default__escape_single_quotes", "macro_sql": "{% macro default__escape_single_quotes(expression) -%}\n{{ expression | replace(\"'\",\"''\") }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.085469, "supported_languages": null}, "macro.dbt.right": {"name": "right", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/right.sql", "original_file_path": "macros/utils/right.sql", "unique_id": "macro.dbt.right", "macro_sql": "{% macro right(string_text, length_expression) -%}\n {{ return(adapter.dispatch('right', 'dbt') (string_text, length_expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__right"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.086175, "supported_languages": null}, "macro.dbt.default__right": {"name": "default__right", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/right.sql", "original_file_path": "macros/utils/right.sql", "unique_id": "macro.dbt.default__right", "macro_sql": "{% macro default__right(string_text, length_expression) %}\n\n right(\n {{ string_text }},\n {{ length_expression }}\n )\n\n{%- endmacro -%}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0865312, "supported_languages": null}, "macro.dbt.listagg": {"name": "listagg", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "unique_id": "macro.dbt.listagg", "macro_sql": "{% macro listagg(measure, delimiter_text=\"','\", order_by_clause=none, limit_num=none) -%}\n {{ return(adapter.dispatch('listagg', 'dbt') (measure, delimiter_text, order_by_clause, limit_num)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__listagg"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.087887, "supported_languages": null}, "macro.dbt.default__listagg": {"name": "default__listagg", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "unique_id": "macro.dbt.default__listagg", "macro_sql": "{% macro default__listagg(measure, delimiter_text, order_by_clause, limit_num) -%}\n\n {% if limit_num -%}\n array_to_string(\n array_slice(\n array_agg(\n {{ measure }}\n ){% if order_by_clause -%}\n within group ({{ order_by_clause }})\n {%- endif %}\n ,0\n ,{{ limit_num }}\n ),\n {{ delimiter_text }}\n )\n {%- else %}\n listagg(\n {{ measure }},\n {{ delimiter_text }}\n )\n {% if order_by_clause -%}\n within group ({{ order_by_clause }})\n {%- endif %}\n {%- endif %}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.089178, "supported_languages": null}, "macro.dbt.datediff": {"name": "datediff", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "unique_id": "macro.dbt.datediff", "macro_sql": "{% macro datediff(first_date, second_date, datepart) %}\n {{ return(adapter.dispatch('datediff', 'dbt')(first_date, second_date, datepart)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__datediff"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.090015, "supported_languages": null}, "macro.dbt.default__datediff": {"name": "default__datediff", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "unique_id": "macro.dbt.default__datediff", "macro_sql": "{% macro default__datediff(first_date, second_date, datepart) -%}\n\n datediff(\n {{ datepart }},\n {{ first_date }},\n {{ second_date }}\n )\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0904791, "supported_languages": null}, "macro.dbt.safe_cast": {"name": "safe_cast", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/safe_cast.sql", "original_file_path": "macros/utils/safe_cast.sql", "unique_id": "macro.dbt.safe_cast", "macro_sql": "{% macro safe_cast(field, type) %}\n {{ return(adapter.dispatch('safe_cast', 'dbt') (field, type)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__safe_cast"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.091284, "supported_languages": null}, "macro.dbt.default__safe_cast": {"name": "default__safe_cast", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/safe_cast.sql", "original_file_path": "macros/utils/safe_cast.sql", "unique_id": "macro.dbt.default__safe_cast", "macro_sql": "{% macro default__safe_cast(field, type) %}\n {# most databases don't support this function yet\n so we just need to use cast #}\n cast({{field}} as {{type}})\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0916579, "supported_languages": null}, "macro.dbt.hash": {"name": "hash", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/hash.sql", "original_file_path": "macros/utils/hash.sql", "unique_id": "macro.dbt.hash", "macro_sql": "{% macro hash(field) -%}\n {{ return(adapter.dispatch('hash', 'dbt') (field)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__hash"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0922902, "supported_languages": null}, "macro.dbt.default__hash": {"name": "default__hash", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/hash.sql", "original_file_path": "macros/utils/hash.sql", "unique_id": "macro.dbt.default__hash", "macro_sql": "{% macro default__hash(field) -%}\n md5(cast({{ field }} as {{ api.Column.translate_type('string') }}))\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0927832, "supported_languages": null}, "macro.dbt.cast_bool_to_text": {"name": "cast_bool_to_text", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/cast_bool_to_text.sql", "original_file_path": "macros/utils/cast_bool_to_text.sql", "unique_id": "macro.dbt.cast_bool_to_text", "macro_sql": "{% macro cast_bool_to_text(field) %}\n {{ adapter.dispatch('cast_bool_to_text', 'dbt') (field) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__cast_bool_to_text"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.093413, "supported_languages": null}, "macro.dbt.default__cast_bool_to_text": {"name": "default__cast_bool_to_text", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/cast_bool_to_text.sql", "original_file_path": "macros/utils/cast_bool_to_text.sql", "unique_id": "macro.dbt.default__cast_bool_to_text", "macro_sql": "{% macro default__cast_bool_to_text(field) %}\n cast({{ field }} as {{ api.Column.translate_type('string') }})\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0938308, "supported_languages": null}, "macro.dbt.any_value": {"name": "any_value", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "unique_id": "macro.dbt.any_value", "macro_sql": "{% macro any_value(expression) -%}\n {{ return(adapter.dispatch('any_value', 'dbt') (expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__any_value"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.094443, "supported_languages": null}, "macro.dbt.default__any_value": {"name": "default__any_value", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "unique_id": "macro.dbt.default__any_value", "macro_sql": "{% macro default__any_value(expression) -%}\n\n any_value({{ expression }})\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.094721, "supported_languages": null}, "macro.dbt.position": {"name": "position", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/position.sql", "original_file_path": "macros/utils/position.sql", "unique_id": "macro.dbt.position", "macro_sql": "{% macro position(substring_text, string_text) -%}\n {{ return(adapter.dispatch('position', 'dbt') (substring_text, string_text)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__position"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.095426, "supported_languages": null}, "macro.dbt.default__position": {"name": "default__position", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/position.sql", "original_file_path": "macros/utils/position.sql", "unique_id": "macro.dbt.default__position", "macro_sql": "{% macro default__position(substring_text, string_text) %}\n\n position(\n {{ substring_text }} in {{ string_text }}\n )\n\n{%- endmacro -%}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.095783, "supported_languages": null}, "macro.dbt.string_literal": {"name": "string_literal", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/literal.sql", "original_file_path": "macros/utils/literal.sql", "unique_id": "macro.dbt.string_literal", "macro_sql": "{%- macro string_literal(value) -%}\n {{ return(adapter.dispatch('string_literal', 'dbt') (value)) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__string_literal"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.096468, "supported_languages": null}, "macro.dbt.default__string_literal": {"name": "default__string_literal", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/literal.sql", "original_file_path": "macros/utils/literal.sql", "unique_id": "macro.dbt.default__string_literal", "macro_sql": "{% macro default__string_literal(value) -%}\n '{{ value }}'\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.096813, "supported_languages": null}, "macro.dbt.type_string": {"name": "type_string", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_string", "macro_sql": "\n\n{%- macro type_string() -%}\n {{ return(adapter.dispatch('type_string', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_string"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.098714, "supported_languages": null}, "macro.dbt.default__type_string": {"name": "default__type_string", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_string", "macro_sql": "{% macro default__type_string() %}\n {{ return(api.Column.translate_type(\"string\")) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.099203, "supported_languages": null}, "macro.dbt.type_timestamp": {"name": "type_timestamp", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_timestamp", "macro_sql": "\n\n{%- macro type_timestamp() -%}\n {{ return(adapter.dispatch('type_timestamp', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0996249, "supported_languages": null}, "macro.dbt.default__type_timestamp": {"name": "default__type_timestamp", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_timestamp", "macro_sql": "{% macro default__type_timestamp() %}\n {{ return(api.Column.translate_type(\"timestamp\")) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1000152, "supported_languages": null}, "macro.dbt.type_float": {"name": "type_float", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_float", "macro_sql": "\n\n{%- macro type_float() -%}\n {{ return(adapter.dispatch('type_float', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_float"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.100458, "supported_languages": null}, "macro.dbt.default__type_float": {"name": "default__type_float", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_float", "macro_sql": "{% macro default__type_float() %}\n {{ return(api.Column.translate_type(\"float\")) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.10098, "supported_languages": null}, "macro.dbt.type_numeric": {"name": "type_numeric", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_numeric", "macro_sql": "\n\n{%- macro type_numeric() -%}\n {{ return(adapter.dispatch('type_numeric', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_numeric"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.101395, "supported_languages": null}, "macro.dbt.default__type_numeric": {"name": "default__type_numeric", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_numeric", "macro_sql": "{% macro default__type_numeric() %}\n {{ return(api.Column.numeric_type(\"numeric\", 28, 6)) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.101847, "supported_languages": null}, "macro.dbt.type_bigint": {"name": "type_bigint", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_bigint", "macro_sql": "\n\n{%- macro type_bigint() -%}\n {{ return(adapter.dispatch('type_bigint', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_bigint"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1022651, "supported_languages": null}, "macro.dbt.default__type_bigint": {"name": "default__type_bigint", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_bigint", "macro_sql": "{% macro default__type_bigint() %}\n {{ return(api.Column.translate_type(\"bigint\")) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.102656, "supported_languages": null}, "macro.dbt.type_int": {"name": "type_int", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_int", "macro_sql": "\n\n{%- macro type_int() -%}\n {{ return(adapter.dispatch('type_int', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_int"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.103071, "supported_languages": null}, "macro.dbt.default__type_int": {"name": "default__type_int", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_int", "macro_sql": "{%- macro default__type_int() -%}\n {{ return(api.Column.translate_type(\"integer\")) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.103501, "supported_languages": null}, "macro.dbt.type_boolean": {"name": "type_boolean", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_boolean", "macro_sql": "\n\n{%- macro type_boolean() -%}\n {{ return(adapter.dispatch('type_boolean', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_boolean"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.103938, "supported_languages": null}, "macro.dbt.default__type_boolean": {"name": "default__type_boolean", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_boolean", "macro_sql": "{%- macro default__type_boolean() -%}\n {{ return(api.Column.translate_type(\"boolean\")) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.104328, "supported_languages": null}, "macro.dbt.array_concat": {"name": "array_concat", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_concat.sql", "original_file_path": "macros/utils/array_concat.sql", "unique_id": "macro.dbt.array_concat", "macro_sql": "{% macro array_concat(array_1, array_2) -%}\n {{ return(adapter.dispatch('array_concat', 'dbt')(array_1, array_2)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__array_concat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.105006, "supported_languages": null}, "macro.dbt.default__array_concat": {"name": "default__array_concat", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_concat.sql", "original_file_path": "macros/utils/array_concat.sql", "unique_id": "macro.dbt.default__array_concat", "macro_sql": "{% macro default__array_concat(array_1, array_2) -%}\n array_cat({{ array_1 }}, {{ array_2 }})\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.105355, "supported_languages": null}, "macro.dbt.bool_or": {"name": "bool_or", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/bool_or.sql", "original_file_path": "macros/utils/bool_or.sql", "unique_id": "macro.dbt.bool_or", "macro_sql": "{% macro bool_or(expression) -%}\n {{ return(adapter.dispatch('bool_or', 'dbt') (expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__bool_or"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1059608, "supported_languages": null}, "macro.dbt.default__bool_or": {"name": "default__bool_or", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/bool_or.sql", "original_file_path": "macros/utils/bool_or.sql", "unique_id": "macro.dbt.default__bool_or", "macro_sql": "{% macro default__bool_or(expression) -%}\n\n bool_or({{ expression }})\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1062348, "supported_languages": null}, "macro.dbt.last_day": {"name": "last_day", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "unique_id": "macro.dbt.last_day", "macro_sql": "{% macro last_day(date, datepart) %}\n {{ return(adapter.dispatch('last_day', 'dbt') (date, datepart)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.107027, "supported_languages": null}, "macro.dbt.default_last_day": {"name": "default_last_day", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "unique_id": "macro.dbt.default_last_day", "macro_sql": "\n\n{%- macro default_last_day(date, datepart) -%}\n cast(\n {{dbt.dateadd('day', '-1',\n dbt.dateadd(datepart, '1', dbt.date_trunc(datepart, date))\n )}}\n as date)\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.dateadd", "macro.dbt.date_trunc"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1076999, "supported_languages": null}, "macro.dbt.default__last_day": {"name": "default__last_day", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "unique_id": "macro.dbt.default__last_day", "macro_sql": "{% macro default__last_day(date, datepart) -%}\n {{dbt.default_last_day(date, datepart)}}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default_last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.108077, "supported_languages": null}, "macro.dbt.split_part": {"name": "split_part", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "unique_id": "macro.dbt.split_part", "macro_sql": "{% macro split_part(string_text, delimiter_text, part_number) %}\n {{ return(adapter.dispatch('split_part', 'dbt') (string_text, delimiter_text, part_number)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__split_part"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1092212, "supported_languages": null}, "macro.dbt.default__split_part": {"name": "default__split_part", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "unique_id": "macro.dbt.default__split_part", "macro_sql": "{% macro default__split_part(string_text, delimiter_text, part_number) %}\n\n split_part(\n {{ string_text }},\n {{ delimiter_text }},\n {{ part_number }}\n )\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.109689, "supported_languages": null}, "macro.dbt._split_part_negative": {"name": "_split_part_negative", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "unique_id": "macro.dbt._split_part_negative", "macro_sql": "{% macro _split_part_negative(string_text, delimiter_text, part_number) %}\n\n split_part(\n {{ string_text }},\n {{ delimiter_text }},\n length({{ string_text }})\n - length(\n replace({{ string_text }}, {{ delimiter_text }}, '')\n ) + 2 {{ part_number }}\n )\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.11041, "supported_languages": null}, "macro.dbt.date_trunc": {"name": "date_trunc", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/date_trunc.sql", "original_file_path": "macros/utils/date_trunc.sql", "unique_id": "macro.dbt.date_trunc", "macro_sql": "{% macro date_trunc(datepart, date) -%}\n {{ return(adapter.dispatch('date_trunc', 'dbt') (datepart, date)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__date_trunc"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.111156, "supported_languages": null}, "macro.dbt.default__date_trunc": {"name": "default__date_trunc", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/date_trunc.sql", "original_file_path": "macros/utils/date_trunc.sql", "unique_id": "macro.dbt.default__date_trunc", "macro_sql": "{% macro default__date_trunc(datepart, date) -%}\n date_trunc('{{datepart}}', {{date}})\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1115131, "supported_languages": null}, "macro.dbt.array_construct": {"name": "array_construct", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_construct.sql", "original_file_path": "macros/utils/array_construct.sql", "unique_id": "macro.dbt.array_construct", "macro_sql": "{% macro array_construct(inputs=[], data_type=api.Column.translate_type('integer')) -%}\n {{ return(adapter.dispatch('array_construct', 'dbt')(inputs, data_type)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__array_construct"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1124191, "supported_languages": null}, "macro.dbt.default__array_construct": {"name": "default__array_construct", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_construct.sql", "original_file_path": "macros/utils/array_construct.sql", "unique_id": "macro.dbt.default__array_construct", "macro_sql": "{% macro default__array_construct(inputs, data_type) -%}\n {% if inputs|length > 0 %}\n array[ {{ inputs|join(' , ') }} ]\n {% else %}\n array[]::{{data_type}}[]\n {% endif %}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.113195, "supported_languages": null}, "macro.dbt.array_append": {"name": "array_append", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_append.sql", "original_file_path": "macros/utils/array_append.sql", "unique_id": "macro.dbt.array_append", "macro_sql": "{% macro array_append(array, new_element) -%}\n {{ return(adapter.dispatch('array_append', 'dbt')(array, new_element)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__array_append"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.113871, "supported_languages": null}, "macro.dbt.default__array_append": {"name": "default__array_append", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_append.sql", "original_file_path": "macros/utils/array_append.sql", "unique_id": "macro.dbt.default__array_append", "macro_sql": "{% macro default__array_append(array, new_element) -%}\n array_append({{ array }}, {{ new_element }})\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.114215, "supported_languages": null}, "macro.dbt.create_schema": {"name": "create_schema", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "unique_id": "macro.dbt.create_schema", "macro_sql": "{% macro create_schema(relation) -%}\n {{ adapter.dispatch('create_schema', 'dbt')(relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__create_schema"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.115037, "supported_languages": null}, "macro.dbt.default__create_schema": {"name": "default__create_schema", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "unique_id": "macro.dbt.default__create_schema", "macro_sql": "{% macro default__create_schema(relation) -%}\n {%- call statement('create_schema') -%}\n create schema if not exists {{ relation.without_identifier() }}\n {% endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.115519, "supported_languages": null}, "macro.dbt.drop_schema": {"name": "drop_schema", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "unique_id": "macro.dbt.drop_schema", "macro_sql": "{% macro drop_schema(relation) -%}\n {{ adapter.dispatch('drop_schema', 'dbt')(relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__drop_schema"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.115935, "supported_languages": null}, "macro.dbt.default__drop_schema": {"name": "default__drop_schema", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "unique_id": "macro.dbt.default__drop_schema", "macro_sql": "{% macro default__drop_schema(relation) -%}\n {%- call statement('drop_schema') -%}\n drop schema if exists {{ relation.without_identifier() }} cascade\n {% endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.116411, "supported_languages": null}, "macro.dbt.current_timestamp": {"name": "current_timestamp", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.current_timestamp", "macro_sql": "{%- macro current_timestamp() -%}\n {{ adapter.dispatch('current_timestamp', 'dbt')() }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.117439, "supported_languages": null}, "macro.dbt.default__current_timestamp": {"name": "default__current_timestamp", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.default__current_timestamp", "macro_sql": "{% macro default__current_timestamp() -%}\n {{ exceptions.raise_not_implemented(\n 'current_timestamp macro not implemented for adapter ' + adapter.type()) }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.117823, "supported_languages": null}, "macro.dbt.snapshot_get_time": {"name": "snapshot_get_time", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.snapshot_get_time", "macro_sql": "\n\n{%- macro snapshot_get_time() -%}\n {{ adapter.dispatch('snapshot_get_time', 'dbt')() }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_get_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.118193, "supported_languages": null}, "macro.dbt.default__snapshot_get_time": {"name": "default__snapshot_get_time", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.default__snapshot_get_time", "macro_sql": "{% macro default__snapshot_get_time() %}\n {{ current_timestamp() }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.118471, "supported_languages": null}, "macro.dbt.current_timestamp_backcompat": {"name": "current_timestamp_backcompat", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.current_timestamp_backcompat", "macro_sql": "{% macro current_timestamp_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_backcompat', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.118895, "supported_languages": null}, "macro.dbt.default__current_timestamp_backcompat": {"name": "default__current_timestamp_backcompat", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.default__current_timestamp_backcompat", "macro_sql": "{% macro default__current_timestamp_backcompat() %}\n current_timestamp::timestamp\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.119096, "supported_languages": null}, "macro.dbt.current_timestamp_in_utc_backcompat": {"name": "current_timestamp_in_utc_backcompat", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.current_timestamp_in_utc_backcompat", "macro_sql": "{% macro current_timestamp_in_utc_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_in_utc_backcompat', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1195142, "supported_languages": null}, "macro.dbt.default__current_timestamp_in_utc_backcompat": {"name": "default__current_timestamp_in_utc_backcompat", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.default__current_timestamp_in_utc_backcompat", "macro_sql": "{% macro default__current_timestamp_in_utc_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_backcompat', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.current_timestamp_backcompat", "macro.dbt_postgres.postgres__current_timestamp_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.119937, "supported_languages": null}, "macro.dbt.get_create_index_sql": {"name": "get_create_index_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "unique_id": "macro.dbt.get_create_index_sql", "macro_sql": "{% macro get_create_index_sql(relation, index_dict) -%}\n {{ return(adapter.dispatch('get_create_index_sql', 'dbt')(relation, index_dict)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_create_index_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.120949, "supported_languages": null}, "macro.dbt.default__get_create_index_sql": {"name": "default__get_create_index_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "unique_id": "macro.dbt.default__get_create_index_sql", "macro_sql": "{% macro default__get_create_index_sql(relation, index_dict) -%}\n {% do return(None) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.121309, "supported_languages": null}, "macro.dbt.create_indexes": {"name": "create_indexes", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "unique_id": "macro.dbt.create_indexes", "macro_sql": "{% macro create_indexes(relation) -%}\n {{ adapter.dispatch('create_indexes', 'dbt')(relation) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.12189, "supported_languages": null}, "macro.dbt.default__create_indexes": {"name": "default__create_indexes", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "unique_id": "macro.dbt.default__create_indexes", "macro_sql": "{% macro default__create_indexes(relation) -%}\n {%- set _indexes = config.get('indexes', default=[]) -%}\n\n {% for _index_dict in _indexes %}\n {% set create_index_sql = get_create_index_sql(relation, _index_dict) %}\n {% if create_index_sql %}\n {% do run_query(create_index_sql) %}\n {% endif %}\n {% endfor %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_create_index_sql", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1229818, "supported_languages": null}, "macro.dbt.make_intermediate_relation": {"name": "make_intermediate_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.make_intermediate_relation", "macro_sql": "{% macro make_intermediate_relation(base_relation, suffix='__dbt_tmp') %}\n {{ return(adapter.dispatch('make_intermediate_relation', 'dbt')(base_relation, suffix)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_intermediate_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1292732, "supported_languages": null}, "macro.dbt.default__make_intermediate_relation": {"name": "default__make_intermediate_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__make_intermediate_relation", "macro_sql": "{% macro default__make_intermediate_relation(base_relation, suffix) %}\n {{ return(default__make_temp_relation(base_relation, suffix)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__make_temp_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.129865, "supported_languages": null}, "macro.dbt.make_temp_relation": {"name": "make_temp_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.make_temp_relation", "macro_sql": "{% macro make_temp_relation(base_relation, suffix='__dbt_tmp') %}\n {{ return(adapter.dispatch('make_temp_relation', 'dbt')(base_relation, suffix)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_temp_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.130422, "supported_languages": null}, "macro.dbt.default__make_temp_relation": {"name": "default__make_temp_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__make_temp_relation", "macro_sql": "{% macro default__make_temp_relation(base_relation, suffix) %}\n {%- set temp_identifier = base_relation.identifier ~ suffix -%}\n {%- set temp_relation = base_relation.incorporate(\n path={\"identifier\": temp_identifier}) -%}\n\n {{ return(temp_relation) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.131156, "supported_languages": null}, "macro.dbt.make_backup_relation": {"name": "make_backup_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.make_backup_relation", "macro_sql": "{% macro make_backup_relation(base_relation, backup_relation_type, suffix='__dbt_backup') %}\n {{ return(adapter.dispatch('make_backup_relation', 'dbt')(base_relation, backup_relation_type, suffix)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_backup_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.131776, "supported_languages": null}, "macro.dbt.default__make_backup_relation": {"name": "default__make_backup_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__make_backup_relation", "macro_sql": "{% macro default__make_backup_relation(base_relation, backup_relation_type, suffix) %}\n {%- set backup_identifier = base_relation.identifier ~ suffix -%}\n {%- set backup_relation = base_relation.incorporate(\n path={\"identifier\": backup_identifier},\n type=backup_relation_type\n ) -%}\n {{ return(backup_relation) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1325812, "supported_languages": null}, "macro.dbt.drop_relation": {"name": "drop_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.drop_relation", "macro_sql": "{% macro drop_relation(relation) -%}\n {{ return(adapter.dispatch('drop_relation', 'dbt')(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__drop_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.133047, "supported_languages": null}, "macro.dbt.default__drop_relation": {"name": "default__drop_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__drop_relation", "macro_sql": "{% macro default__drop_relation(relation) -%}\n {% call statement('drop_relation', auto_begin=False) -%}\n drop {{ relation.type }} if exists {{ relation }} cascade\n {%- endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.133596, "supported_languages": null}, "macro.dbt.truncate_relation": {"name": "truncate_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.truncate_relation", "macro_sql": "{% macro truncate_relation(relation) -%}\n {{ return(adapter.dispatch('truncate_relation', 'dbt')(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__truncate_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.134069, "supported_languages": null}, "macro.dbt.default__truncate_relation": {"name": "default__truncate_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__truncate_relation", "macro_sql": "{% macro default__truncate_relation(relation) -%}\n {% call statement('truncate_relation') -%}\n truncate table {{ relation }}\n {%- endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1344929, "supported_languages": null}, "macro.dbt.rename_relation": {"name": "rename_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.rename_relation", "macro_sql": "{% macro rename_relation(from_relation, to_relation) -%}\n {{ return(adapter.dispatch('rename_relation', 'dbt')(from_relation, to_relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__rename_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.13501, "supported_languages": null}, "macro.dbt.default__rename_relation": {"name": "default__rename_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__rename_relation", "macro_sql": "{% macro default__rename_relation(from_relation, to_relation) -%}\n {% set target_name = adapter.quote_as_configured(to_relation.identifier, 'identifier') %}\n {% call statement('rename_relation') -%}\n alter table {{ from_relation }} rename to {{ target_name }}\n {%- endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.135726, "supported_languages": null}, "macro.dbt.get_or_create_relation": {"name": "get_or_create_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.get_or_create_relation", "macro_sql": "{% macro get_or_create_relation(database, schema, identifier, type) -%}\n {{ return(adapter.dispatch('get_or_create_relation', 'dbt')(database, schema, identifier, type)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_or_create_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.136352, "supported_languages": null}, "macro.dbt.default__get_or_create_relation": {"name": "default__get_or_create_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__get_or_create_relation", "macro_sql": "{% macro default__get_or_create_relation(database, schema, identifier, type) %}\n {%- set target_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}\n\n {% if target_relation %}\n {% do return([true, target_relation]) %}\n {% endif %}\n\n {%- set new_relation = api.Relation.create(\n database=database,\n schema=schema,\n identifier=identifier,\n type=type\n ) -%}\n {% do return([false, new_relation]) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1378748, "supported_languages": null}, "macro.dbt.load_cached_relation": {"name": "load_cached_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.load_cached_relation", "macro_sql": "{% macro load_cached_relation(relation) %}\n {% do return(adapter.get_relation(\n database=relation.database,\n schema=relation.schema,\n identifier=relation.identifier\n )) -%}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1384661, "supported_languages": null}, "macro.dbt.load_relation": {"name": "load_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.load_relation", "macro_sql": "{% macro load_relation(relation) %}\n {{ return(load_cached_relation(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.load_cached_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.138835, "supported_languages": null}, "macro.dbt.drop_relation_if_exists": {"name": "drop_relation_if_exists", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.drop_relation_if_exists", "macro_sql": "{% macro drop_relation_if_exists(relation) %}\n {% if relation is not none %}\n {{ adapter.drop_relation(relation) }}\n {% endif %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.139338, "supported_languages": null}, "macro.dbt.collect_freshness": {"name": "collect_freshness", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/freshness.sql", "original_file_path": "macros/adapters/freshness.sql", "unique_id": "macro.dbt.collect_freshness", "macro_sql": "{% macro collect_freshness(source, loaded_at_field, filter) %}\n {{ return(adapter.dispatch('collect_freshness', 'dbt')(source, loaded_at_field, filter))}}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__collect_freshness"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.140299, "supported_languages": null}, "macro.dbt.default__collect_freshness": {"name": "default__collect_freshness", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/freshness.sql", "original_file_path": "macros/adapters/freshness.sql", "unique_id": "macro.dbt.default__collect_freshness", "macro_sql": "{% macro default__collect_freshness(source, loaded_at_field, filter) %}\n {% call statement('collect_freshness', fetch_result=True, auto_begin=False) -%}\n select\n max({{ loaded_at_field }}) as max_loaded_at,\n {{ current_timestamp() }} as snapshotted_at\n from {{ source }}\n {% if filter %}\n where {{ filter }}\n {% endif %}\n {% endcall %}\n {{ return(load_result('collect_freshness').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement", "macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.141366, "supported_languages": null}, "macro.dbt.copy_grants": {"name": "copy_grants", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.copy_grants", "macro_sql": "{% macro copy_grants() %}\n {{ return(adapter.dispatch('copy_grants', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__copy_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.144733, "supported_languages": null}, "macro.dbt.default__copy_grants": {"name": "default__copy_grants", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__copy_grants", "macro_sql": "{% macro default__copy_grants() %}\n {{ return(True) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.14506, "supported_languages": null}, "macro.dbt.support_multiple_grantees_per_dcl_statement": {"name": "support_multiple_grantees_per_dcl_statement", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.support_multiple_grantees_per_dcl_statement", "macro_sql": "{% macro support_multiple_grantees_per_dcl_statement() %}\n {{ return(adapter.dispatch('support_multiple_grantees_per_dcl_statement', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__support_multiple_grantees_per_dcl_statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.145495, "supported_languages": null}, "macro.dbt.default__support_multiple_grantees_per_dcl_statement": {"name": "default__support_multiple_grantees_per_dcl_statement", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__support_multiple_grantees_per_dcl_statement", "macro_sql": "\n\n{%- macro default__support_multiple_grantees_per_dcl_statement() -%}\n {{ return(True) }}\n{%- endmacro -%}\n\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1457942, "supported_languages": null}, "macro.dbt.should_revoke": {"name": "should_revoke", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.should_revoke", "macro_sql": "{% macro should_revoke(existing_relation, full_refresh_mode=True) %}\n\n {% if not existing_relation %}\n {#-- The table doesn't already exist, so no grants to copy over --#}\n {{ return(False) }}\n {% elif full_refresh_mode %}\n {#-- The object is being REPLACED -- whether grants are copied over depends on the value of user config --#}\n {{ return(copy_grants()) }}\n {% else %}\n {#-- The table is being merged/upserted/inserted -- grants will be carried over --#}\n {{ return(True) }}\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.copy_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.146819, "supported_languages": null}, "macro.dbt.get_show_grant_sql": {"name": "get_show_grant_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.get_show_grant_sql", "macro_sql": "{% macro get_show_grant_sql(relation) %}\n {{ return(adapter.dispatch(\"get_show_grant_sql\", \"dbt\")(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_show_grant_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.147292, "supported_languages": null}, "macro.dbt.default__get_show_grant_sql": {"name": "default__get_show_grant_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__get_show_grant_sql", "macro_sql": "{% macro default__get_show_grant_sql(relation) %}\n show grants on {{ relation }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.147562, "supported_languages": null}, "macro.dbt.get_grant_sql": {"name": "get_grant_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.get_grant_sql", "macro_sql": "{% macro get_grant_sql(relation, privilege, grantees) %}\n {{ return(adapter.dispatch('get_grant_sql', 'dbt')(relation, privilege, grantees)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_grant_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1481369, "supported_languages": null}, "macro.dbt.default__get_grant_sql": {"name": "default__get_grant_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__get_grant_sql", "macro_sql": "\n\n{%- macro default__get_grant_sql(relation, privilege, grantees) -%}\n grant {{ privilege }} on {{ relation }} to {{ grantees | join(', ') }}\n{%- endmacro -%}\n\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.148619, "supported_languages": null}, "macro.dbt.get_revoke_sql": {"name": "get_revoke_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.get_revoke_sql", "macro_sql": "{% macro get_revoke_sql(relation, privilege, grantees) %}\n {{ return(adapter.dispatch('get_revoke_sql', 'dbt')(relation, privilege, grantees)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_revoke_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1491919, "supported_languages": null}, "macro.dbt.default__get_revoke_sql": {"name": "default__get_revoke_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__get_revoke_sql", "macro_sql": "\n\n{%- macro default__get_revoke_sql(relation, privilege, grantees) -%}\n revoke {{ privilege }} on {{ relation }} from {{ grantees | join(', ') }}\n{%- endmacro -%}\n\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.149673, "supported_languages": null}, "macro.dbt.get_dcl_statement_list": {"name": "get_dcl_statement_list", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.get_dcl_statement_list", "macro_sql": "{% macro get_dcl_statement_list(relation, grant_config, get_dcl_macro) %}\n {{ return(adapter.dispatch('get_dcl_statement_list', 'dbt')(relation, grant_config, get_dcl_macro)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_dcl_statement_list"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1502562, "supported_languages": null}, "macro.dbt.default__get_dcl_statement_list": {"name": "default__get_dcl_statement_list", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__get_dcl_statement_list", "macro_sql": "\n\n{%- macro default__get_dcl_statement_list(relation, grant_config, get_dcl_macro) -%}\n {#\n -- Unpack grant_config into specific privileges and the set of users who need them granted/revoked.\n -- Depending on whether this database supports multiple grantees per statement, pass in the list of\n -- all grantees per privilege, or (if not) template one statement per privilege-grantee pair.\n -- `get_dcl_macro` will be either `get_grant_sql` or `get_revoke_sql`\n #}\n {%- set dcl_statements = [] -%}\n {%- for privilege, grantees in grant_config.items() %}\n {%- if support_multiple_grantees_per_dcl_statement() and grantees -%}\n {%- set dcl = get_dcl_macro(relation, privilege, grantees) -%}\n {%- do dcl_statements.append(dcl) -%}\n {%- else -%}\n {%- for grantee in grantees -%}\n {% set dcl = get_dcl_macro(relation, privilege, [grantee]) %}\n {%- do dcl_statements.append(dcl) -%}\n {% endfor -%}\n {%- endif -%}\n {%- endfor -%}\n {{ return(dcl_statements) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.support_multiple_grantees_per_dcl_statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.151925, "supported_languages": null}, "macro.dbt.call_dcl_statements": {"name": "call_dcl_statements", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.call_dcl_statements", "macro_sql": "{% macro call_dcl_statements(dcl_statement_list) %}\n {{ return(adapter.dispatch(\"call_dcl_statements\", \"dbt\")(dcl_statement_list)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__call_dcl_statements"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.152409, "supported_languages": null}, "macro.dbt.default__call_dcl_statements": {"name": "default__call_dcl_statements", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__call_dcl_statements", "macro_sql": "{% macro default__call_dcl_statements(dcl_statement_list) %}\n {#\n -- By default, supply all grant + revoke statements in a single semicolon-separated block,\n -- so that they're all processed together.\n\n -- Some databases do not support this. Those adapters will need to override this macro\n -- to run each statement individually.\n #}\n {% call statement('grants') %}\n {% for dcl_statement in dcl_statement_list %}\n {{ dcl_statement }};\n {% endfor %}\n {% endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.153051, "supported_languages": null}, "macro.dbt.apply_grants": {"name": "apply_grants", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.apply_grants", "macro_sql": "{% macro apply_grants(relation, grant_config, should_revoke) %}\n {{ return(adapter.dispatch(\"apply_grants\", \"dbt\")(relation, grant_config, should_revoke)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__apply_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.153632, "supported_languages": null}, "macro.dbt.default__apply_grants": {"name": "default__apply_grants", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__apply_grants", "macro_sql": "{% macro default__apply_grants(relation, grant_config, should_revoke=True) %}\n {#-- If grant_config is {} or None, this is a no-op --#}\n {% if grant_config %}\n {% if should_revoke %}\n {#-- We think previous grants may have carried over --#}\n {#-- Show current grants and calculate diffs --#}\n {% set current_grants_table = run_query(get_show_grant_sql(relation)) %}\n {% set current_grants_dict = adapter.standardize_grants_dict(current_grants_table) %}\n {% set needs_granting = diff_of_two_dicts(grant_config, current_grants_dict) %}\n {% set needs_revoking = diff_of_two_dicts(current_grants_dict, grant_config) %}\n {% if not (needs_granting or needs_revoking) %}\n {{ log('On ' ~ relation ~': All grants are in place, no revocation or granting needed.')}}\n {% endif %}\n {% else %}\n {#-- We don't think there's any chance of previous grants having carried over. --#}\n {#-- Jump straight to granting what the user has configured. --#}\n {% set needs_revoking = {} %}\n {% set needs_granting = grant_config %}\n {% endif %}\n {% if needs_granting or needs_revoking %}\n {% set revoke_statement_list = get_dcl_statement_list(relation, needs_revoking, get_revoke_sql) %}\n {% set grant_statement_list = get_dcl_statement_list(relation, needs_granting, get_grant_sql) %}\n {% set dcl_statement_list = revoke_statement_list + grant_statement_list %}\n {% if dcl_statement_list %}\n {{ call_dcl_statements(dcl_statement_list) }}\n {% endif %}\n {% endif %}\n {% endif %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.run_query", "macro.dbt.get_show_grant_sql", "macro.dbt.get_dcl_statement_list", "macro.dbt.call_dcl_statements"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.156651, "supported_languages": null}, "macro.dbt.alter_column_comment": {"name": "alter_column_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.alter_column_comment", "macro_sql": "{% macro alter_column_comment(relation, column_dict) -%}\n {{ return(adapter.dispatch('alter_column_comment', 'dbt')(relation, column_dict)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__alter_column_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.158199, "supported_languages": null}, "macro.dbt.default__alter_column_comment": {"name": "default__alter_column_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.default__alter_column_comment", "macro_sql": "{% macro default__alter_column_comment(relation, column_dict) -%}\n {{ exceptions.raise_not_implemented(\n 'alter_column_comment macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1586392, "supported_languages": null}, "macro.dbt.alter_relation_comment": {"name": "alter_relation_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.alter_relation_comment", "macro_sql": "{% macro alter_relation_comment(relation, relation_comment) -%}\n {{ return(adapter.dispatch('alter_relation_comment', 'dbt')(relation, relation_comment)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__alter_relation_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.159168, "supported_languages": null}, "macro.dbt.default__alter_relation_comment": {"name": "default__alter_relation_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.default__alter_relation_comment", "macro_sql": "{% macro default__alter_relation_comment(relation, relation_comment) -%}\n {{ exceptions.raise_not_implemented(\n 'alter_relation_comment macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.159607, "supported_languages": null}, "macro.dbt.persist_docs": {"name": "persist_docs", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.persist_docs", "macro_sql": "{% macro persist_docs(relation, model, for_relation=true, for_columns=true) -%}\n {{ return(adapter.dispatch('persist_docs', 'dbt')(relation, model, for_relation, for_columns)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.160279, "supported_languages": null}, "macro.dbt.default__persist_docs": {"name": "default__persist_docs", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.default__persist_docs", "macro_sql": "{% macro default__persist_docs(relation, model, for_relation, for_columns) -%}\n {% if for_relation and config.persist_relation_docs() and model.description %}\n {% do run_query(alter_relation_comment(relation, model.description)) %}\n {% endif %}\n\n {% if for_columns and config.persist_column_docs() and model.columns %}\n {% do run_query(alter_column_comment(relation, model.columns)) %}\n {% endif %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.run_query", "macro.dbt.alter_relation_comment", "macro.dbt.alter_column_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.161473, "supported_languages": null}, "macro.dbt.get_catalog": {"name": "get_catalog", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.get_catalog", "macro_sql": "{% macro get_catalog(information_schema, schemas) -%}\n {{ return(adapter.dispatch('get_catalog', 'dbt')(information_schema, schemas)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_catalog"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1643069, "supported_languages": null}, "macro.dbt.default__get_catalog": {"name": "default__get_catalog", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__get_catalog", "macro_sql": "{% macro default__get_catalog(information_schema, schemas) -%}\n\n {% set typename = adapter.type() %}\n {% set msg -%}\n get_catalog not implemented for {{ typename }}\n {%- endset %}\n\n {{ exceptions.raise_compiler_error(msg) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.16497, "supported_languages": null}, "macro.dbt.information_schema_name": {"name": "information_schema_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.information_schema_name", "macro_sql": "{% macro information_schema_name(database) %}\n {{ return(adapter.dispatch('information_schema_name', 'dbt')(database)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__information_schema_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.165441, "supported_languages": null}, "macro.dbt.default__information_schema_name": {"name": "default__information_schema_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__information_schema_name", "macro_sql": "{% macro default__information_schema_name(database) -%}\n {%- if database -%}\n {{ database }}.INFORMATION_SCHEMA\n {%- else -%}\n INFORMATION_SCHEMA\n {%- endif -%}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.166005, "supported_languages": null}, "macro.dbt.list_schemas": {"name": "list_schemas", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.list_schemas", "macro_sql": "{% macro list_schemas(database) -%}\n {{ return(adapter.dispatch('list_schemas', 'dbt')(database)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__list_schemas"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.166465, "supported_languages": null}, "macro.dbt.default__list_schemas": {"name": "default__list_schemas", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__list_schemas", "macro_sql": "{% macro default__list_schemas(database) -%}\n {% set sql %}\n select distinct schema_name\n from {{ information_schema_name(database) }}.SCHEMATA\n where catalog_name ilike '{{ database }}'\n {% endset %}\n {{ return(run_query(sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.information_schema_name", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.167077, "supported_languages": null}, "macro.dbt.check_schema_exists": {"name": "check_schema_exists", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.check_schema_exists", "macro_sql": "{% macro check_schema_exists(information_schema, schema) -%}\n {{ return(adapter.dispatch('check_schema_exists', 'dbt')(information_schema, schema)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__check_schema_exists"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.167588, "supported_languages": null}, "macro.dbt.default__check_schema_exists": {"name": "default__check_schema_exists", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__check_schema_exists", "macro_sql": "{% macro default__check_schema_exists(information_schema, schema) -%}\n {% set sql -%}\n select count(*)\n from {{ information_schema.replace(information_schema_view='SCHEMATA') }}\n where catalog_name='{{ information_schema.database }}'\n and schema_name='{{ schema }}'\n {%- endset %}\n {{ return(run_query(sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.replace", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.168344, "supported_languages": null}, "macro.dbt.list_relations_without_caching": {"name": "list_relations_without_caching", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.list_relations_without_caching", "macro_sql": "{% macro list_relations_without_caching(schema_relation) %}\n {{ return(adapter.dispatch('list_relations_without_caching', 'dbt')(schema_relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__list_relations_without_caching"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.168813, "supported_languages": null}, "macro.dbt.default__list_relations_without_caching": {"name": "default__list_relations_without_caching", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__list_relations_without_caching", "macro_sql": "{% macro default__list_relations_without_caching(schema_relation) %}\n {{ exceptions.raise_not_implemented(\n 'list_relations_without_caching macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.169232, "supported_languages": null}, "macro.dbt.get_columns_in_relation": {"name": "get_columns_in_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.get_columns_in_relation", "macro_sql": "{% macro get_columns_in_relation(relation) -%}\n {{ return(adapter.dispatch('get_columns_in_relation', 'dbt')(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_columns_in_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.172499, "supported_languages": null}, "macro.dbt.default__get_columns_in_relation": {"name": "default__get_columns_in_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.default__get_columns_in_relation", "macro_sql": "{% macro default__get_columns_in_relation(relation) -%}\n {{ exceptions.raise_not_implemented(\n 'get_columns_in_relation macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.172908, "supported_languages": null}, "macro.dbt.sql_convert_columns_in_relation": {"name": "sql_convert_columns_in_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.sql_convert_columns_in_relation", "macro_sql": "{% macro sql_convert_columns_in_relation(table) -%}\n {% set columns = [] %}\n {% for row in table %}\n {% do columns.append(api.Column(*row)) %}\n {% endfor %}\n {{ return(columns) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.173693, "supported_languages": null}, "macro.dbt.get_columns_in_query": {"name": "get_columns_in_query", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.get_columns_in_query", "macro_sql": "{% macro get_columns_in_query(select_sql) -%}\n {{ return(adapter.dispatch('get_columns_in_query', 'dbt')(select_sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_columns_in_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.174159, "supported_languages": null}, "macro.dbt.default__get_columns_in_query": {"name": "default__get_columns_in_query", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.default__get_columns_in_query", "macro_sql": "{% macro default__get_columns_in_query(select_sql) %}\n {% call statement('get_columns_in_query', fetch_result=True, auto_begin=False) -%}\n select * from (\n {{ select_sql }}\n ) as __dbt_sbq\n where false\n limit 0\n {% endcall %}\n\n {{ return(load_result('get_columns_in_query').table.columns | map(attribute='name') | list) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.175012, "supported_languages": null}, "macro.dbt.alter_column_type": {"name": "alter_column_type", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.alter_column_type", "macro_sql": "{% macro alter_column_type(relation, column_name, new_column_type) -%}\n {{ return(adapter.dispatch('alter_column_type', 'dbt')(relation, column_name, new_column_type)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__alter_column_type"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1755981, "supported_languages": null}, "macro.dbt.default__alter_column_type": {"name": "default__alter_column_type", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.default__alter_column_type", "macro_sql": "{% macro default__alter_column_type(relation, column_name, new_column_type) -%}\n {#\n 1. Create a new column (w/ temp name and correct type)\n 2. Copy data over to it\n 3. Drop the existing column (cascade!)\n 4. Rename the new column to existing column\n #}\n {%- set tmp_column = column_name + \"__dbt_alter\" -%}\n\n {% call statement('alter_column_type') %}\n alter table {{ relation }} add column {{ adapter.quote(tmp_column) }} {{ new_column_type }};\n update {{ relation }} set {{ adapter.quote(tmp_column) }} = {{ adapter.quote(column_name) }};\n alter table {{ relation }} drop column {{ adapter.quote(column_name) }} cascade;\n alter table {{ relation }} rename column {{ adapter.quote(tmp_column) }} to {{ adapter.quote(column_name) }}\n {% endcall %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.177139, "supported_languages": null}, "macro.dbt.alter_relation_add_remove_columns": {"name": "alter_relation_add_remove_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.alter_relation_add_remove_columns", "macro_sql": "{% macro alter_relation_add_remove_columns(relation, add_columns = none, remove_columns = none) -%}\n {{ return(adapter.dispatch('alter_relation_add_remove_columns', 'dbt')(relation, add_columns, remove_columns)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__alter_relation_add_remove_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.177797, "supported_languages": null}, "macro.dbt.default__alter_relation_add_remove_columns": {"name": "default__alter_relation_add_remove_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.default__alter_relation_add_remove_columns", "macro_sql": "{% macro default__alter_relation_add_remove_columns(relation, add_columns, remove_columns) %}\n\n {% if add_columns is none %}\n {% set add_columns = [] %}\n {% endif %}\n {% if remove_columns is none %}\n {% set remove_columns = [] %}\n {% endif %}\n\n {% set sql -%}\n\n alter {{ relation.type }} {{ relation }}\n\n {% for column in add_columns %}\n add column {{ column.name }} {{ column.data_type }}{{ ',' if not loop.last }}\n {% endfor %}{{ ',' if add_columns and remove_columns }}\n\n {% for column in remove_columns %}\n drop column {{ column.name }}{{ ',' if not loop.last }}\n {% endfor %}\n\n {%- endset -%}\n\n {% do run_query(sql) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.179789, "supported_languages": null}, "macro.dbt.build_ref_function": {"name": "build_ref_function", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.build_ref_function", "macro_sql": "{% macro build_ref_function(model) %}\n\n {%- set ref_dict = {} -%}\n {%- for _ref in model.refs -%}\n {%- set resolved = ref(*_ref) -%}\n {%- do ref_dict.update({_ref | join(\".\"): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}\n {%- endfor -%}\n\ndef ref(*args,dbt_load_df_function):\n refs = {{ ref_dict | tojson }}\n key = \".\".join(args)\n return dbt_load_df_function(refs[key])\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.182821, "supported_languages": null}, "macro.dbt.build_source_function": {"name": "build_source_function", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.build_source_function", "macro_sql": "{% macro build_source_function(model) %}\n\n {%- set source_dict = {} -%}\n {%- for _source in model.sources -%}\n {%- set resolved = source(*_source) -%}\n {%- do source_dict.update({_source | join(\".\"): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}\n {%- endfor -%}\n\ndef source(*args, dbt_load_df_function):\n sources = {{ source_dict | tojson }}\n key = \".\".join(args)\n return dbt_load_df_function(sources[key])\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.183993, "supported_languages": null}, "macro.dbt.build_config_dict": {"name": "build_config_dict", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.build_config_dict", "macro_sql": "{% macro build_config_dict(model) %}\n {%- set config_dict = {} -%}\n {% set config_dbt_used = zip(model.config.config_keys_used, model.config.config_keys_defaults) | list %}\n {%- for key, default in config_dbt_used -%}\n {# weird type testing with enum, would be much easier to write this logic in Python! #}\n {%- if key == 'language' -%}\n {%- set value = 'python' -%}\n {%- endif -%}\n {%- set value = model.config.get(key, default) -%}\n {%- do config_dict.update({key: value}) -%}\n {%- endfor -%}\nconfig_dict = {{ config_dict }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.185586, "supported_languages": null}, "macro.dbt.py_script_postfix": {"name": "py_script_postfix", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.py_script_postfix", "macro_sql": "{% macro py_script_postfix(model) %}\n# This part is user provided model code\n# you will need to copy the next section to run the code\n# COMMAND ----------\n# this part is dbt logic for get ref work, do not modify\n\n{{ build_ref_function(model ) }}\n{{ build_source_function(model ) }}\n{{ build_config_dict(model) }}\n\nclass config:\n def __init__(self, *args, **kwargs):\n pass\n\n @staticmethod\n def get(key, default=None):\n return config_dict.get(key, default)\n\nclass this:\n \"\"\"dbt.this() or dbt.this.identifier\"\"\"\n database = '{{ this.database }}'\n schema = '{{ this.schema }}'\n identifier = '{{ this.identifier }}'\n def __repr__(self):\n return '{{ this }}'\n\n\nclass dbtObj:\n def __init__(self, load_df_function) -> None:\n self.source = lambda *args: source(*args, dbt_load_df_function=load_df_function)\n self.ref = lambda *args: ref(*args, dbt_load_df_function=load_df_function)\n self.config = config\n self.this = this()\n self.is_incremental = {{ is_incremental() }}\n\n# COMMAND ----------\n{{py_script_comment()}}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.build_ref_function", "macro.dbt.build_source_function", "macro.dbt.build_config_dict", "macro.dbt.is_incremental", "macro.dbt.py_script_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.186578, "supported_languages": null}, "macro.dbt.py_script_comment": {"name": "py_script_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.py_script_comment", "macro_sql": "{%macro py_script_comment()%}\n{%endmacro%}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1867762, "supported_languages": null}, "macro.dbt.test_unique": {"name": "test_unique", "resource_type": "macro", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "unique_id": "macro.dbt.test_unique", "macro_sql": "{% test unique(model, column_name) %}\n {% set macro = adapter.dispatch('test_unique', 'dbt') %}\n {{ macro(model, column_name) }}\n{% endtest %}", "depends_on": {"macros": ["macro.dbt.default__test_unique"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1879969, "supported_languages": null}, "macro.dbt.test_not_null": {"name": "test_not_null", "resource_type": "macro", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "unique_id": "macro.dbt.test_not_null", "macro_sql": "{% test not_null(model, column_name) %}\n {% set macro = adapter.dispatch('test_not_null', 'dbt') %}\n {{ macro(model, column_name) }}\n{% endtest %}", "depends_on": {"macros": ["macro.dbt.default__test_not_null"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.188813, "supported_languages": null}, "macro.dbt.test_accepted_values": {"name": "test_accepted_values", "resource_type": "macro", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "unique_id": "macro.dbt.test_accepted_values", "macro_sql": "{% test accepted_values(model, column_name, values, quote=True) %}\n {% set macro = adapter.dispatch('test_accepted_values', 'dbt') %}\n {{ macro(model, column_name, values, quote) }}\n{% endtest %}", "depends_on": {"macros": ["macro.dbt.default__test_accepted_values"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.189566, "supported_languages": null}, "macro.dbt.test_relationships": {"name": "test_relationships", "resource_type": "macro", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "unique_id": "macro.dbt.test_relationships", "macro_sql": "{% test relationships(model, column_name, to, field) %}\n {% set macro = adapter.dispatch('test_relationships', 'dbt') %}\n {{ macro(model, column_name, to, field) }}\n{% endtest %}", "depends_on": {"macros": ["macro.dbt.default__test_relationships"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1902661, "supported_languages": null}}, "docs": {"doc.dbt.__overview__": {"name": "__overview__", "resource_type": "doc", "package_name": "dbt", "path": "overview.md", "original_file_path": "docs/overview.md", "unique_id": "doc.dbt.__overview__", "block_contents": "### Welcome!\n\nWelcome to the auto-generated documentation for your dbt project!\n\n### Navigation\n\nYou can use the `Project` and `Database` navigation tabs on the left side of the window to explore the models\nin your project.\n\n#### Project Tab\nThe `Project` tab mirrors the directory structure of your dbt project. In this tab, you can see all of the\nmodels defined in your dbt project, as well as models imported from dbt packages.\n\n#### Database Tab\nThe `Database` tab also exposes your models, but in a format that looks more like a database explorer. This view\nshows relations (tables and views) grouped into database schemas. Note that ephemeral models are _not_ shown\nin this interface, as they do not exist in the database.\n\n### Graph Exploration\nYou can click the blue icon on the bottom-right corner of the page to view the lineage graph of your models.\n\nOn model pages, you'll see the immediate parents and children of the model you're exploring. By clicking the `Expand`\nbutton at the top-right of this lineage pane, you'll be able to see all of the models that are used to build,\nor are built from, the model you're exploring.\n\nOnce expanded, you'll be able to use the `--select` and `--exclude` model selection syntax to filter the\nmodels in the graph. For more information on model selection, check out the [dbt docs](https://docs.getdbt.com/docs/model-selection-syntax).\n\nNote that you can also right-click on models to interactively filter and explore the graph.\n\n---\n\n### More information\n\n- [What is dbt](https://docs.getdbt.com/docs/introduction)?\n- Read the [dbt viewpoint](https://docs.getdbt.com/docs/viewpoint)\n- [Installation](https://docs.getdbt.com/docs/installation)\n- Join the [dbt Community](https://www.getdbt.com/community/) for questions and discussion"}}, "exposures": {}, "metrics": {"metric.test.my_metric": {"name": "my_metric", "resource_type": "metric", "package_name": "test", "path": "metric.yml", "original_file_path": "models/metric.yml", "unique_id": "metric.test.my_metric", "fqn": ["test", "my_metric"], "description": "", "label": "Count records", "calculation_method": "count", "timestamp": "updated_at", "expression": "*", "filters": [], "time_grains": ["day"], "dimensions": [], "window": null, "model": "ref('my_model')", "model_unique_id": null, "meta": {}, "tags": [], "config": {"enabled": true}, "unrendered_config": {}, "sources": [], "depends_on": {"macros": [], "nodes": ["model.test.my_model"]}, "refs": [["my_model"]], "metrics": [], "created_at": 1670853278.56334}}, "selectors": {}, "disabled": {}, "parent_map": {"model.test.my_model": [], "metric.test.my_metric": ["model.test.my_model"]}, "child_map": {"model.test.my_model": ["metric.test.my_metric"], "metric.test.my_metric": []}} diff --git a/tests/functional/artifacts/expected_manifest.py b/tests/functional/artifacts/expected_manifest.py index 32c9dcfbfa1..51a6b633e40 100644 --- a/tests/functional/artifacts/expected_manifest.py +++ b/tests/functional/artifacts/expected_manifest.py @@ -94,7 +94,9 @@ def get_rendered_snapshot_config(**updates): "strategy": "check", "check_cols": "all", "unique_key": "id", + "target_database": None, "target_schema": None, + "updated_at": None, "meta": {}, "grants": {}, "packages": [], @@ -241,7 +243,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "build_path": None, "created_at": ANY, "name": "model", - "root_path": project.project_root, "relation_name": relation_name_node_format.format( model_database, my_schema_name, "model" ), @@ -321,7 +322,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "build_path": None, "created_at": ANY, "name": "second_model", - "root_path": project.project_root, "relation_name": relation_name_node_format.format( project.database, alternate_schema, "second_model" ), @@ -399,30 +399,19 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "unrendered_config": unrendered_second_config, }, "seed.test.seed": { - "compiled_path": None, "build_path": None, "created_at": ANY, - "compiled": True, - "compiled_code": "", "config": seed_config, "patch_path": "test://" + seed_schema_yml_path, "path": "seed.csv", "name": "seed", "root_path": project.project_root, - "relation_name": relation_name_node_format.format( - project.database, my_schema_name, "seed" - ), "resource_type": "seed", "raw_code": "", - "language": "sql", "package_name": "test", "original_file_path": seed_path, - "refs": [], - "sources": [], - "depends_on": {"nodes": [], "macros": []}, "unique_id": "seed.test.seed", "fqn": ["test", "seed"], - "metrics": [], "tags": [], "meta": {}, "schema": my_schema_name, @@ -473,12 +462,11 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): }, }, "docs": {"node_color": None, "show": True}, - "compiled": True, - "compiled_code": "", - "extra_ctes_injected": True, - "extra_ctes": [], "checksum": checksum_file(seed_path), "unrendered_config": unrendered_seed_config, + "relation_name": relation_name_node_format.format( + project.database, my_schema_name, "seed" + ), }, "test.test.not_null_model_id.d01cc630e6": { "alias": "not_null_model_id", @@ -510,7 +498,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "refs": [["model"]], "relation_name": None, "resource_type": "test", - "root_path": project.project_root, "schema": test_audit_schema, "database": project.database, "tags": [], @@ -571,7 +558,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): project.database, alternate_schema, "snapshot_seed" ), "resource_type": "snapshot", - "root_path": project.project_root, "schema": alternate_schema, "sources": [], "tags": [], @@ -608,7 +594,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "refs": [["model"]], "relation_name": None, "resource_type": "test", - "root_path": project.project_root, "schema": test_audit_schema, "database": project.database, "tags": [], @@ -659,7 +644,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "refs": [["model"]], "relation_name": None, "resource_type": "test", - "root_path": project.project_root, "schema": test_audit_schema, "database": project.database, "tags": [], @@ -725,7 +709,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): project.database, my_schema_name, "seed" ), "resource_type": "source", - "root_path": project.project_root, "schema": my_schema_name, "source_description": "My source", "source_name": "my_source", @@ -751,6 +734,7 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "fqn": ["test", "notebook_exposure"], "maturity": "medium", "meta": {"tool": "my_tool", "languages": ["python"]}, + "metrics": [], "tags": ["my_department"], "name": "notebook_exposure", "original_file_path": os.path.join("models", "schema.yml"), @@ -759,7 +743,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "path": "schema.yml", "refs": [["model"], ["second_model"]], "resource_type": "exposure", - "root_path": project.project_root, "sources": [], "type": "notebook", "unique_id": "exposure.test.notebook_exposure", @@ -778,6 +761,7 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "enabled": True, }, "fqn": ["test", "simple_exposure"], + "metrics": [], "name": "simple_exposure", "original_file_path": os.path.join("models", "schema.yml"), "owner": { @@ -788,7 +772,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "path": "schema.yml", "refs": [["model"]], "resource_type": "exposure", - "root_path": project.project_root, "sources": [["my_source", "my_table"]], "type": "dashboard", "unique_id": "exposure.test.simple_exposure", @@ -839,9 +822,9 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "test.test.unique_model_id.67b76558ff": [], }, "docs": { - "dbt.__overview__": ANY, - "test.macro_info": ANY, - "test.macro_arg_info": ANY, + "doc.dbt.__overview__": ANY, + "doc.test.macro_info": ANY, + "doc.test.macro_arg_info": ANY, }, "disabled": {}, } @@ -892,7 +875,6 @@ def expected_references_manifest(project): "refs": [], "relation_name": None, "resource_type": "model", - "root_path": project.project_root, "schema": my_schema_name, "database": project.database, "tags": [], @@ -948,7 +930,6 @@ def expected_references_manifest(project): model_database, my_schema_name ), "resource_type": "model", - "root_path": project.project_root, "schema": my_schema_name, "database": project.database, "tags": [], @@ -1002,7 +983,6 @@ def expected_references_manifest(project): "refs": [["ephemeral_summary"]], "relation_name": '"{0}"."{1}".view_summary'.format(model_database, my_schema_name), "resource_type": "model", - "root_path": project.project_root, "schema": my_schema_name, "sources": [], "tags": [], @@ -1017,7 +997,6 @@ def expected_references_manifest(project): }, "seed.test.seed": { "alias": "seed", - "compiled_path": None, "build_path": None, "created_at": ANY, "columns": { @@ -1063,22 +1042,16 @@ def expected_references_manifest(project): }, }, "config": get_rendered_seed_config(), - "sources": [], - "depends_on": {"macros": [], "nodes": []}, "deferred": False, "description": "The test seed", "docs": {"node_color": None, "show": True}, "fqn": ["test", "seed"], - "metrics": [], "name": "seed", "original_file_path": seed_path, "package_name": "test", "patch_path": "test://" + os.path.join("seeds", "schema.yml"), "path": "seed.csv", "raw_code": "", - "language": "sql", - "refs": [], - "relation_name": '"{0}"."{1}".seed'.format(model_database, my_schema_name), "resource_type": "seed", "root_path": project.project_root, "schema": my_schema_name, @@ -1086,12 +1059,11 @@ def expected_references_manifest(project): "tags": [], "meta": {}, "unique_id": "seed.test.seed", - "compiled": True, - "compiled_code": "", - "extra_ctes_injected": True, - "extra_ctes": [], "checksum": checksum_file(seed_path), "unrendered_config": get_unrendered_seed_config(), + "relation_name": '"{0}"."{1}".seed'.format( + project.database, my_schema_name + ), }, "snapshot.test.snapshot_seed": { "alias": "snapshot_seed", @@ -1125,7 +1097,6 @@ def expected_references_manifest(project): model_database, alternate_schema ), "resource_type": "snapshot", - "root_path": project.project_root, "schema": alternate_schema, "sources": [], "tags": [], @@ -1176,7 +1147,6 @@ def expected_references_manifest(project): "patch_path": None, "relation_name": '{0}."{1}"."seed"'.format(project.database, my_schema_name), "resource_type": "source", - "root_path": project.project_root, "schema": my_schema_name, "source_description": "My source", "source_name": "my_source", @@ -1199,6 +1169,7 @@ def expected_references_manifest(project): "fqn": ["test", "notebook_exposure"], "maturity": "medium", "meta": {"tool": "my_tool", "languages": ["python"]}, + "metrics": [], "tags": ["my_department"], "name": "notebook_exposure", "original_file_path": os.path.join("models", "schema.yml"), @@ -1207,7 +1178,6 @@ def expected_references_manifest(project): "path": "schema.yml", "refs": [["view_summary"]], "resource_type": "exposure", - "root_path": project.project_root, "sources": [], "type": "notebook", "unique_id": "exposure.test.notebook_exposure", @@ -1218,98 +1188,98 @@ def expected_references_manifest(project): "metrics": {}, "selectors": {}, "docs": { - "dbt.__overview__": ANY, - "test.column_info": { + "doc.dbt.__overview__": ANY, + "doc.test.column_info": { "block_contents": "An ID field", + "resource_type": "doc", "name": "column_info", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.column_info", + "unique_id": "doc.test.column_info", }, - "test.ephemeral_summary": { + "doc.test.ephemeral_summary": { "block_contents": ("A summmary table of the ephemeral copy of the seed data"), + "resource_type": "doc", "name": "ephemeral_summary", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.ephemeral_summary", + "unique_id": "doc.test.ephemeral_summary", }, - "test.source_info": { + "doc.test.source_info": { "block_contents": "My source", + "resource_type": "doc", "name": "source_info", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.source_info", + "unique_id": "doc.test.source_info", }, - "test.summary_count": { + "doc.test.summary_count": { "block_contents": "The number of instances of the first name", + "resource_type": "doc", "name": "summary_count", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.summary_count", + "unique_id": "doc.test.summary_count", }, - "test.summary_first_name": { + "doc.test.summary_first_name": { "block_contents": "The first name being summarized", + "resource_type": "doc", "name": "summary_first_name", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.summary_first_name", + "unique_id": "doc.test.summary_first_name", }, - "test.table_info": { + "doc.test.table_info": { "block_contents": "My table", + "resource_type": "doc", "name": "table_info", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.table_info", + "unique_id": "doc.test.table_info", }, - "test.view_summary": { + "doc.test.view_summary": { "block_contents": ( "A view of the summary of the ephemeral copy of the seed data" ), + "resource_type": "doc", "name": "view_summary", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.view_summary", + "unique_id": "doc.test.view_summary", }, - "test.macro_info": { + "doc.test.macro_info": { "block_contents": "My custom test that I wrote that does nothing", + "resource_type": "doc", "name": "macro_info", "original_file_path": os.path.join("macros", "macro.md"), "package_name": "test", "path": "macro.md", - "root_path": project.project_root, - "unique_id": "test.macro_info", + "unique_id": "doc.test.macro_info", }, - "test.notebook_info": { + "doc.test.notebook_info": { "block_contents": "A description of the complex exposure", + "resource_type": "doc", "name": "notebook_info", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.notebook_info", + "unique_id": "doc.test.notebook_info", }, - "test.macro_arg_info": { + "doc.test.macro_arg_info": { "block_contents": "The model for my custom test", + "resource_type": "doc", "name": "macro_arg_info", "original_file_path": os.path.join("macros", "macro.md"), "package_name": "test", "path": "macro.md", - "root_path": project.project_root, - "unique_id": "test.macro_arg_info", + "unique_id": "doc.test.macro_arg_info", }, }, "child_map": { @@ -1348,8 +1318,6 @@ def expected_references_manifest(project): "patch_path": "test://" + os.path.join("macros", "schema.yml"), "resource_type": "macro", "unique_id": "macro.test.test_nothing", - "tags": [], - "root_path": project.project_root, "supported_languages": None, "arguments": [ { diff --git a/tests/functional/artifacts/test_previous_version_state.py b/tests/functional/artifacts/test_previous_version_state.py index c835e5a001c..a7a7ed5417c 100644 --- a/tests/functional/artifacts/test_previous_version_state.py +++ b/tests/functional/artifacts/test_previous_version_state.py @@ -42,7 +42,7 @@ class TestPreviousVersionState: - CURRENT_EXPECTED_MANIFEST_VERSION = 7 + CURRENT_EXPECTED_MANIFEST_VERSION = 8 @pytest.fixture(scope="class") def models(self): diff --git a/tests/functional/colors/test_colors.py b/tests/functional/colors/test_colors.py new file mode 100644 index 00000000000..7e92e039506 --- /dev/null +++ b/tests/functional/colors/test_colors.py @@ -0,0 +1,43 @@ +import pytest +import re +from dbt.tests.util import run_dbt_and_capture + + +models__do_nothing_then_fail_sql = """ +select 1, + +""" + + +@pytest.fixture(scope="class") +def models(): + return {"do_nothing_then_fail.sql": models__do_nothing_then_fail_sql} + + +@pytest.fixture(scope="class") +def project_config_update(): + return {'config-version': 2} + + +class TestColors: + def test_use_colors(self, project): + self.assert_colors_used( + "--use-colors", + expect_colors=True, + ) + + def test_no_use_colors(self, project): + self.assert_colors_used( + "--no-use-colors", + expect_colors=False, + ) + + def assert_colors_used(self, flag, expect_colors): + _, stdout = run_dbt_and_capture(args=[flag, "run"], expect_pass=False) + # pattern to match formatted log output + pattern = re.compile(r"\[31m.*|\[33m.*") + stdout_contains_formatting_characters = bool(pattern.search(stdout)) + if expect_colors: + assert stdout_contains_formatting_characters + else: + assert not stdout_contains_formatting_characters diff --git a/tests/functional/context_methods/test_builtin_functions.py b/tests/functional/context_methods/test_builtin_functions.py index 68501c146f9..e2f416d2fb4 100644 --- a/tests/functional/context_methods/test_builtin_functions.py +++ b/tests/functional/context_methods/test_builtin_functions.py @@ -112,15 +112,15 @@ def test_builtin_invocation_args_dict_function(self, project): expected = "invocation_result: {'debug': True, 'log_format': 'json', 'write_json': True, 'use_colors': True, 'printer_width': 80, 'version_check': True, 'partial_parse': True, 'static_parser': True, 'profiles_dir': " assert expected in str(result) - expected = "'send_anonymous_usage_stats': False, 'event_buffer_size': 100000, 'quiet': False, 'no_print': False, 'macro': 'validate_invocation', 'args': '{my_variable: test_variable}', 'which': 'run-operation', 'rpc_method': 'run-operation', 'anonymous_usage_stats': True, 'indirect_selection': 'eager'}" + expected = "'send_anonymous_usage_stats': False, 'quiet': False, 'no_print': False, 'macro': 'validate_invocation', 'args': '{my_variable: test_variable}', 'which': 'run-operation', 'rpc_method': 'run-operation', 'anonymous_usage_stats': True, 'indirect_selection': 'eager'}" assert expected in str(result) def test_builtin_dbt_metadata_envs_function(self, project, monkeypatch): envs = { - "DBT_ENV_CUSTOM_ENV_RUN_ID": 1234, - "DBT_ENV_CUSTOM_ENV_JOB_ID": 5678, - "DBT_ENV_RUN_ID": 91011, - "RANDOM_ENV": 121314, + "DBT_ENV_CUSTOM_ENV_RUN_ID": "1234", + "DBT_ENV_CUSTOM_ENV_JOB_ID": "5678", + "DBT_ENV_RUN_ID": "91011", + "RANDOM_ENV": "121314", } monkeypatch.setattr(os, "environ", envs) @@ -133,7 +133,7 @@ def test_builtin_dbt_metadata_envs_function(self, project, monkeypatch): assert result - expected = "dbt_metadata_envs_result:{'RUN_ID': 1234, 'JOB_ID': 5678}" + expected = "dbt_metadata_envs_result:{'RUN_ID': '1234', 'JOB_ID': '5678'}" assert expected in str(result) diff --git a/tests/functional/duplicates/test_duplicate_model.py b/tests/functional/duplicates/test_duplicate_model.py index 031ba6236c0..fbcd1b79671 100644 --- a/tests/functional/duplicates/test_duplicate_model.py +++ b/tests/functional/duplicates/test_duplicate_model.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationException, DuplicateResourceName from dbt.tests.fixtures.project import write_project_files from dbt.tests.util import run_dbt, get_manifest @@ -108,7 +108,7 @@ def packages(self): def test_duplicate_model_enabled_across_packages(self, project): run_dbt(["deps"]) message = "dbt found two models with the name" - with pytest.raises(CompilationException) as exc: + with pytest.raises(DuplicateResourceName) as exc: run_dbt(["run"]) assert message in str(exc.value) diff --git a/tests/functional/exit_codes/fixtures.py b/tests/functional/exit_codes/fixtures.py new file mode 100644 index 00000000000..23a0bef3897 --- /dev/null +++ b/tests/functional/exit_codes/fixtures.py @@ -0,0 +1,78 @@ +import pytest + +bad_sql = """ +select bad sql here +""" + +dupe_sql = """ +select 1 as id, current_date as updated_at +union all +select 2 as id, current_date as updated_at +union all +select 3 as id, current_date as updated_at +union all +select 4 as id, current_date as updated_at +""" + +good_sql = """ +select 1 as id, current_date as updated_at +union all +select 2 as id, current_date as updated_at +union all +select 3 as id, current_date as updated_at +union all +select 4 as id, current_date as updated_at +""" + +snapshots_good_sql = """ +{% snapshot good_snapshot %} + {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at')}} + select * from {{ schema }}.good +{% endsnapshot %} +""" + +snapshots_bad_sql = """ +{% snapshot good_snapshot %} + {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at_not_real')}} + select * from {{ schema }}.good +{% endsnapshot %} +""" + +schema_yml = """ +version: 2 +models: +- name: good + columns: + - name: updated_at + tests: + - not_null +- name: bad + columns: + - name: updated_at + tests: + - not_null +- name: dupe + columns: + - name: updated_at + tests: + - unique +""" + +data_seed_good_csv = """a,b,c +1,2,3 +""" + +data_seed_bad_csv = """a,b,c +1,\2,3,a,a,a +""" + + +class BaseConfigProject: + @pytest.fixture(scope="class") + def models(self): + return { + "bad.sql": bad_sql, + "dupe.sql": dupe_sql, + "good.sql": good_sql, + "schema.yml": schema_yml + } diff --git a/tests/functional/exit_codes/test_exit_codes.py b/tests/functional/exit_codes/test_exit_codes.py new file mode 100644 index 00000000000..54b5cb6865e --- /dev/null +++ b/tests/functional/exit_codes/test_exit_codes.py @@ -0,0 +1,124 @@ +import pytest + +import dbt.exceptions +from dbt.tests.util import ( + check_table_does_exist, + check_table_does_not_exist, + run_dbt +) +from tests.functional.exit_codes.fixtures import ( + BaseConfigProject, + snapshots_bad_sql, + snapshots_good_sql, + data_seed_bad_csv, + data_seed_good_csv +) + + +class TestExitCodes(BaseConfigProject): + @pytest.fixture(scope="class") + def snapshots(self): + return {"g.sql": snapshots_good_sql} + + def test_exit_code_run_succeed(self, project): + results = run_dbt(['run', '--model', 'good']) + assert len(results) == 1 + check_table_does_exist(project.adapter, 'good') + + def test_exit_code_run_fail(self, project): + results = run_dbt(['run', '--model', 'bad'], expect_pass=False) + assert len(results) == 1 + check_table_does_not_exist(project.adapter, 'bad') + + def test_schema_test_pass(self, project): + results = run_dbt(['run', '--model', 'good']) + assert len(results) == 1 + + results = run_dbt(['test', '--model', 'good']) + assert len(results) == 1 + + def test_schema_test_fail(self, project): + results = run_dbt(['run', '--model', 'dupe']) + assert len(results) == 1 + + results = run_dbt(['test', '--model', 'dupe'], expect_pass=False) + assert len(results) == 1 + + def test_compile(self, project): + results = run_dbt(['compile']) + assert len(results) == 7 + + def test_snapshot_pass(self, project): + run_dbt(["run", "--model", "good"]) + results = run_dbt(['snapshot']) + assert len(results) == 1 + check_table_does_exist(project.adapter, 'good_snapshot') + + +class TestExitCodesSnapshotFail(BaseConfigProject): + @pytest.fixture(scope="class") + def snapshots(self): + return {"b.sql": snapshots_bad_sql} + + def test_snapshot_fail(self, project): + results = run_dbt(['run', '--model', 'good']) + assert len(results) == 1 + + results = run_dbt(['snapshot'], expect_pass=False) + assert len(results) == 1 + check_table_does_not_exist(project.adapter, 'good_snapshot') + + +class TestExitCodesDeps: + @pytest.fixture(scope="class") + def packages(self): + return { + "packages": [ + { + 'git': 'https://github.com/dbt-labs/dbt-integration-project', + 'revision': 'dbt/1.0.0', + } + ] + } + + def test_deps(self, project): + results = run_dbt(['deps']) + assert results is None + + +class TestExitCodesDepsFail: + @pytest.fixture(scope="class") + def packages(self): + return { + "packages": [ + { + 'git': 'https://github.com/dbt-labs/dbt-integration-project', + 'revision': 'bad-branch', + }, + ] + } + + def test_deps_fail(self, project): + with pytest.raises(dbt.exceptions.GitCheckoutError) as exc: + run_dbt(['deps']) + expected_msg = "Error checking out spec='bad-branch'" + assert expected_msg in str(exc.value) + + +class TestExitCodesSeed: + @pytest.fixture(scope="class") + def seeds(self): + return {"good.csv": data_seed_good_csv} + + def test_seed(self, project): + results = run_dbt(['seed']) + assert len(results) == 1 + + +class TestExitCodesSeedFail: + @pytest.fixture(scope="class") + def seeds(self): + return {"bad.csv": data_seed_bad_csv} + + def test_seed(self, project): + run_dbt(['seed'], expect_pass=False) diff --git a/tests/functional/exposures/fixtures.py b/tests/functional/exposures/fixtures.py index 847a3cf5f73..1d573b1a7b6 100644 --- a/tests/functional/exposures/fixtures.py +++ b/tests/functional/exposures/fixtures.py @@ -7,6 +7,29 @@ select 1 as id """ + +source_schema_yml = """version: 2 + +sources: + - name: test_source + tables: + - name: test_table +""" + +metrics_schema_yml = """version: 2 + +metrics: + - name: metric + model: ref('model') + label: "label" + + calculation_method: count_distinct + expression: id + + timestamp: first_order + time_grains: [day] +""" + simple_exposure_yml = """ version: 2 @@ -16,6 +39,8 @@ type: dashboard depends_on: - ref('model') + - source('test_source', 'test_table') + - metric('metric') owner: email: something@example.com - name: notebook_exposure diff --git a/tests/functional/exposures/test_exposure_configs.py b/tests/functional/exposures/test_exposure_configs.py index ed49f565ec7..a7018204952 100644 --- a/tests/functional/exposures/test_exposure_configs.py +++ b/tests/functional/exposures/test_exposure_configs.py @@ -10,7 +10,9 @@ simple_exposure_yml, disabled_models_exposure_yml, enabled_yaml_level_exposure_yml, - invalid_config_exposure_yml + invalid_config_exposure_yml, + source_schema_yml, + metrics_schema_yml ) @@ -29,7 +31,9 @@ def models(self): return { "model.sql": models_sql, "second_model.sql": second_model_sql, - "schema.yml": simple_exposure_yml, + "exposure.yml": simple_exposure_yml, + "schema.yml": source_schema_yml, + "metrics.yml": metrics_schema_yml, } @pytest.fixture(scope="class") diff --git a/tests/functional/exposures/test_exposures.py b/tests/functional/exposures/test_exposures.py index 52ff74d4b0c..777a8e161c4 100644 --- a/tests/functional/exposures/test_exposures.py +++ b/tests/functional/exposures/test_exposures.py @@ -5,6 +5,8 @@ models_sql, second_model_sql, simple_exposure_yml, + source_schema_yml, + metrics_schema_yml ) @@ -15,6 +17,8 @@ def models(self): "exposure.yml": simple_exposure_yml, "model.sql": models_sql, "second_model.sql": second_model_sql, + "schema.yml": source_schema_yml, + "metrics.yml": metrics_schema_yml, } def test_names_with_spaces(self, project): @@ -27,3 +31,14 @@ def test_names_with_spaces(self, project): ] assert exposure_ids == expected_exposure_ids assert manifest.exposures["exposure.test.simple_exposure"].label == "simple exposure label" + + def test_depends_on(self, project): + run_dbt(["run"]) + manifest = get_manifest(project.project_root) + exposure_depends_on = manifest.exposures["exposure.test.simple_exposure"].depends_on.nodes + expected_exposure_depends_on = [ + 'source.test.test_source.test_table', + 'model.test.model', + 'metric.test.metric' + ] + assert sorted(exposure_depends_on) == sorted(expected_exposure_depends_on) diff --git a/tests/functional/incremental_schema_tests/fixtures.py b/tests/functional/incremental_schema_tests/fixtures.py new file mode 100644 index 00000000000..c6eebc5e183 --- /dev/null +++ b/tests/functional/incremental_schema_tests/fixtures.py @@ -0,0 +1,395 @@ + +# +# Properties +# +_PROPERTIES__SCHEMA = """ +version: 2 + +models: + - name: model_a + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_ignore + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_ignore_target + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_append_new_columns + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_append_new_columns_target + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_sync_all_columns + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_sync_all_columns_target + columns: + - name: id + tags: [column_leveL_tag] + tests: + - unique +""" + +# +# Models +# +_MODELS__INCREMENTAL_SYNC_REMOVE_ONLY = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='sync_all_columns' + + ) +}} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% set string_type = 'varchar(10)' %} + +{% if is_incremental() %} + +SELECT id, + cast(field1 as {{string_type}}) as field1 + +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +select id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2 + +from source_data where id <= 3 + +{% endif %} +""" + +_MODELS__INCREMENTAL_IGNORE = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='ignore' + ) +}} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% if is_incremental() %} + +SELECT id, field1, field2, field3, field4 FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +SELECT id, field1, field2 FROM source_data LIMIT 3 + +{% endif %} +""" + +_MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET = """ +{{ + config(materialized='table') +}} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +{% set string_type = 'varchar(10)' %} + +select id + ,cast(field1 as {{string_type}}) as field1 + +from source_data +order by id +""" + +_MODELS__INCREMENTAL_IGNORE_TARGET = """ +{{ + config(materialized='table') +}} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +select id + ,field1 + ,field2 + +from source_data +""" + +_MODELS__INCREMENTAL_FAIL = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='fail' + ) +}} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% if is_incremental() %} + +SELECT id, field1, field2 FROM source_data + +{% else %} + +SELECT id, field1, field3 FROm source_data + +{% endif %} +""" + +_MODELS__INCREMENTAL_SYNC_ALL_COLUMNS = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='sync_all_columns' + + ) +}} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% set string_type = 'varchar(10)' %} + +{% if is_incremental() %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field3 as {{string_type}}) as field3, -- to validate new fields + cast(field4 as {{string_type}}) AS field4 -- to validate new fields + +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +select id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2 + +from source_data where id <= 3 + +{% endif %} +""" + +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='append_new_columns' + ) +}} + +{% set string_type = 'varchar(10)' %} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% if is_incremental() %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field3 as {{string_type}}) as field3, + cast(field4 as {{string_type}}) as field4 +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2 +FROM source_data where id <= 3 + +{% endif %} +""" + +_MODELS__A = """ +{{ + config(materialized='table') +}} + +with source_data as ( + + select 1 as id, 'aaa' as field1, 'bbb' as field2, 111 as field3, 'TTT' as field4 + union all select 2 as id, 'ccc' as field1, 'ddd' as field2, 222 as field3, 'UUU' as field4 + union all select 3 as id, 'eee' as field1, 'fff' as field2, 333 as field3, 'VVV' as field4 + union all select 4 as id, 'ggg' as field1, 'hhh' as field2, 444 as field3, 'WWW' as field4 + union all select 5 as id, 'iii' as field1, 'jjj' as field2, 555 as field3, 'XXX' as field4 + union all select 6 as id, 'kkk' as field1, 'lll' as field2, 666 as field3, 'YYY' as field4 + +) + +select id + ,field1 + ,field2 + ,field3 + ,field4 + +from source_data +""" + +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET = """ +{{ + config(materialized='table') +}} + +{% set string_type = 'varchar(10)' %} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +select id + ,cast(field1 as {{string_type}}) as field1 + ,cast(field2 as {{string_type}}) as field2 + ,cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3 + ,cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4 + +from source_data +""" + +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='append_new_columns' + ) +}} + +{% set string_type = 'varchar(10)' %} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% if is_incremental() %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2, + cast(field3 as {{string_type}}) as field3, + cast(field4 as {{string_type}}) as field4 +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2 +FROM source_data where id <= 3 + +{% endif %} +""" + +_MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET = """ +{{ + config(materialized='table') +}} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +{% set string_type = 'varchar(10)' %} + +select id + ,cast(field1 as {{string_type}}) as field1 + --,field2 + ,cast(case when id <= 3 then null else field3 end as {{string_type}}) as field3 + ,cast(case when id <= 3 then null else field4 end as {{string_type}}) as field4 + +from source_data +order by id +""" + +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET = """ +{{ + config(materialized='table') +}} + +{% set string_type = 'varchar(10)' %} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +select id, + cast(field1 as {{string_type}}) as field1, + cast(CASE WHEN id > 3 THEN NULL ELSE field2 END as {{string_type}}) AS field2, + cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3, + cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4 + +from source_data +""" + +# +# Tests +# + +_TESTS__SELECT_FROM_INCREMENTAL_IGNORE = """ +select * from {{ ref('incremental_ignore') }} where false +""" + +_TESTS__SELECT_FROM_A = """ +select * from {{ ref('model_a') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET = """ +select * from {{ ref('incremental_append_new_columns_target') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS = """ +select * from {{ ref('incremental_sync_all_columns') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET = """ +select * from {{ ref('incremental_sync_all_columns_target') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET = """ +select * from {{ ref('incremental_ignore_target') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS = """ +select * from {{ ref('incremental_append_new_columns') }} where false +""" diff --git a/tests/functional/incremental_schema_tests/test_incremental_schema.py b/tests/functional/incremental_schema_tests/test_incremental_schema.py new file mode 100644 index 00000000000..3ee9e6477e4 --- /dev/null +++ b/tests/functional/incremental_schema_tests/test_incremental_schema.py @@ -0,0 +1,136 @@ +import pytest + +from dbt.tests.util import ( + check_relations_equal, + run_dbt, +) + +from tests.functional.incremental_schema_tests.fixtures import ( + _PROPERTIES__SCHEMA, + _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY, + _MODELS__INCREMENTAL_IGNORE, + _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET, + _MODELS__INCREMENTAL_IGNORE_TARGET, + _MODELS__INCREMENTAL_FAIL, + _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE, + _MODELS__A, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS, + _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, + _TESTS__SELECT_FROM_INCREMENTAL_IGNORE, + _TESTS__SELECT_FROM_A, + _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS, + _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET, + _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS, +) + + +class TestIncrementalSchemaChange: + @pytest.fixture(scope="class") + def properties(self): + return { + "schema.yml": _PROPERTIES__SCHEMA, + } + + @pytest.fixture(scope="class") + def models(self): + return { + "incremental_sync_remove_only.sql": _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY, + "incremental_ignore.sql": _MODELS__INCREMENTAL_IGNORE, + "incremental_sync_remove_only_target.sql": + _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET, + "incremental_ignore_target.sql": _MODELS__INCREMENTAL_IGNORE_TARGET, + "incremental_fail.sql": _MODELS__INCREMENTAL_FAIL, + "incremental_sync_all_columns.sql": _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS, + "incremental_append_new_columns_remove_one.sql": + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE, + "model_a.sql": _MODELS__A, + "incremental_append_new_columns_target.sql": + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + "incremental_append_new_columns.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS, + "incremental_sync_all_columns_target.sql": + _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + "incremental_append_new_columns_remove_one_target.sql": + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, + } + + @pytest.fixture(scope="class") + def tests(self): + return { + "select_from_incremental.sql": _TESTS__SELECT_FROM_INCREMENTAL_IGNORE, + "select_from_a.sql": _TESTS__SELECT_FROM_A, + "select_from_incremental_append_new_columns_target.sql": + _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + "select_from_incremental_sync_all_columns.sql": + _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS, + "select_from_incremental_sync_all_columns_target.sql": + _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + "select_from_incremental_ignore_target.sql": + _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET, + "select_from_incremental_append_new_columns.sql": + _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS, + } + + def run_twice_and_assert( + self, include, compare_source, compare_target, project + ): + + # dbt run (twice) + run_args = ['run'] + if include: + run_args.extend(('--select', include)) + results_one = run_dbt(run_args) + assert len(results_one) == 3 + + results_two = run_dbt(run_args) + assert len(results_two) == 3 + + check_relations_equal(project.adapter, [compare_source, compare_target]) + + def run_incremental_append_new_columns(self, project): + select = 'model_a incremental_append_new_columns incremental_append_new_columns_target' + compare_source = 'incremental_append_new_columns' + compare_target = 'incremental_append_new_columns_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def run_incremental_append_new_columns_remove_one(self, project): + select = 'model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target' + compare_source = 'incremental_append_new_columns_remove_one' + compare_target = 'incremental_append_new_columns_remove_one_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def run_incremental_sync_all_columns(self, project): + select = 'model_a incremental_sync_all_columns incremental_sync_all_columns_target' + compare_source = 'incremental_sync_all_columns' + compare_target = 'incremental_sync_all_columns_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def run_incremental_sync_remove_only(self, project): + select = 'model_a incremental_sync_remove_only incremental_sync_remove_only_target' + compare_source = 'incremental_sync_remove_only' + compare_target = 'incremental_sync_remove_only_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def test_run_incremental_ignore(self, project): + select = 'model_a incremental_ignore incremental_ignore_target' + compare_source = 'incremental_ignore' + compare_target = 'incremental_ignore_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def test_run_incremental_append_new_columns(self, project): + self.run_incremental_append_new_columns(project) + self.run_incremental_append_new_columns_remove_one(project) + + def test_run_incremental_sync_all_columns(self, project): + self.run_incremental_sync_all_columns(project) + self.run_incremental_sync_remove_only(project) + + def test_run_incremental_fail_on_schema_change(self, project): + select = 'model_a incremental_fail' + run_dbt(['run', '--models', select, '--full-refresh']) + results_two = run_dbt(['run', '--models', select], expect_pass=False) + assert 'Compilation Error' in results_two[1].message diff --git a/tests/functional/list/test_list.py b/tests/functional/list/test_list.py index 78fca376d7d..cf0d3d89add 100644 --- a/tests/functional/list/test_list.py +++ b/tests/functional/list/test_list.py @@ -357,7 +357,6 @@ def expect_seed_output(self): "json": { "name": "seed", "package_name": "test", - "depends_on": {"nodes": [], "macros": []}, "tags": [], "config": { "enabled": True, diff --git a/tests/functional/logging/test_logging.py b/tests/functional/logging/test_logging.py new file mode 100644 index 00000000000..b0feea50809 --- /dev/null +++ b/tests/functional/logging/test_logging.py @@ -0,0 +1,51 @@ +import pytest +from dbt.tests.util import run_dbt, get_manifest, read_file +import json + + +my_model_sql = """ + select 1 as fun +""" + + +@pytest.fixture(scope="class") +def models(): + return {"my_model.sql": my_model_sql} + + +# This test checks that various events contain node_info, +# which is supplied by the log_contextvars context manager +def test_basic(project, logs_dir): + results = run_dbt(["--log-format=json", "run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + assert "model.test.my_model" in manifest.nodes + + # get log file + log_file = read_file(logs_dir, "dbt.log") + assert log_file + node_start = False + node_finished = False + for log_line in log_file.split('\n'): + # skip empty lines + if len(log_line) == 0: + continue + # The adapter logging also shows up, so skip non-json lines + if "[debug]" in log_line: + continue + log_dct = json.loads(log_line) + log_event = log_dct['info']['name'] + if log_event == "NodeStart": + node_start = True + if log_event == "NodeFinished": + node_finished = True + if node_start and not node_finished: + if log_event == 'NodeExecuting': + assert "node_info" in log_dct + if log_event == "JinjaLogDebug": + assert "node_info" in log_dct + if log_event == "SQLQuery": + assert "node_info" in log_dct + if log_event == "TimingInfoCollected": + assert "node_info" in log_dct + assert "timing_info" in log_dct diff --git a/tests/functional/partial_parsing/test_pp_docs.py b/tests/functional/partial_parsing/test_pp_docs.py index f9ab5e3a2d7..b3c7d52212d 100644 --- a/tests/functional/partial_parsing/test_pp_docs.py +++ b/tests/functional/partial_parsing/test_pp_docs.py @@ -129,7 +129,7 @@ def test_pp_docs(self, project): results = run_dbt(["--partial-parse", "run"]) manifest = get_manifest(project.project_root) assert len(manifest.docs) == 2 - doc_id = "test.customer_table" + doc_id = "doc.test.customer_table" assert doc_id in manifest.docs doc = manifest.docs[doc_id] doc_file_id = doc.file_id @@ -225,7 +225,7 @@ def models(self): def test_remove_replace(self, project): run_dbt(["parse", "--write-manifest"]) manifest = get_manifest(project.project_root) - doc_id = "test.whatever" + doc_id = "doc.test.whatever" assert doc_id in manifest.docs doc = manifest.docs[doc_id] doc_file = manifest.files[doc.file_id] diff --git a/tests/functional/postgres/fixtures.py b/tests/functional/postgres/fixtures.py new file mode 100644 index 00000000000..93b26b4f31b --- /dev/null +++ b/tests/functional/postgres/fixtures.py @@ -0,0 +1,134 @@ +models__incremental_sql = """ +{{ + config( + materialized = "incremental", + indexes=[ + {'columns': ['column_a'], 'type': 'hash'}, + {'columns': ['column_a', 'column_b'], 'unique': True}, + ] + ) +}} + +select * +from ( + select 1 as column_a, 2 as column_b +) t + +{% if is_incremental() %} + where column_a > (select max(column_a) from {{this}}) +{% endif %} + +""" + +models__table_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'columns': ['column_a']}, + {'columns': ['column_b']}, + {'columns': ['column_a', 'column_b']}, + {'columns': ['column_b', 'column_a'], 'type': 'btree', 'unique': True}, + {'columns': ['column_a'], 'type': 'hash'} + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +models_invalid__invalid_columns_type_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'columns': 'column_a, column_b'}, + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +models_invalid__invalid_type_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'columns': ['column_a'], 'type': 'non_existent_type'}, + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +models_invalid__invalid_unique_config_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'columns': ['column_a'], 'unique': 'yes'}, + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +models_invalid__missing_columns_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'unique': True}, + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +snapshots__colors_sql = """ +{% snapshot colors %} + + {{ + config( + target_database=database, + target_schema=schema, + unique_key='id', + strategy='check', + check_cols=['color'], + indexes=[ + {'columns': ['id'], 'type': 'hash'}, + {'columns': ['id', 'color'], 'unique': True}, + ] + ) + }} + + {% if var('version') == 1 %} + + select 1 as id, 'red' as color union all + select 2 as id, 'green' as color + + {% else %} + + select 1 as id, 'blue' as color union all + select 2 as id, 'green' as color + + {% endif %} + +{% endsnapshot %} + +""" + +seeds__seed_csv = """country_code,country_name +US,United States +CA,Canada +GB,United Kingdom +""" diff --git a/tests/functional/postgres/test_postgres_indexes.py b/tests/functional/postgres/test_postgres_indexes.py new file mode 100644 index 00000000000..64d61d2df87 --- /dev/null +++ b/tests/functional/postgres/test_postgres_indexes.py @@ -0,0 +1,149 @@ +import pytest +import re +from dbt.tests.util import ( + run_dbt, + run_dbt_and_capture, +) +from tests.functional.postgres.fixtures import ( + models__incremental_sql, + models__table_sql, + models_invalid__missing_columns_sql, + models_invalid__invalid_columns_type_sql, + models_invalid__invalid_type_sql, + models_invalid__invalid_unique_config_sql, + seeds__seed_csv, + snapshots__colors_sql, +) + + +INDEX_DEFINITION_PATTERN = re.compile(r"using\s+(\w+)\s+\((.+)\)\Z") + + +class TestPostgresIndex: + @pytest.fixture(scope="class") + def models(self): + return { + "table.sql": models__table_sql, + "incremental.sql": models__incremental_sql, + } + + @pytest.fixture(scope="class") + def seeds(self): + return {"seed.csv": seeds__seed_csv} + + @pytest.fixture(scope="class") + def snapshots(self): + return {"colors.sql": snapshots__colors_sql} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "config-version": 2, + "seeds": { + "quote_columns": False, + "indexes": [ + {"columns": ["country_code"], "unique": False, "type": "hash"}, + {"columns": ["country_code", "country_name"], "unique": True}, + ], + }, + "vars": { + "version": 1, + }, + } + + def test_table(self, project, unique_schema): + results = run_dbt(["run", "--models", "table"]) + assert len(results) == 1 + + indexes = self.get_indexes("table", project, unique_schema) + expected = [ + {"columns": "column_a", "unique": False, "type": "btree"}, + {"columns": "column_b", "unique": False, "type": "btree"}, + {"columns": "column_a, column_b", "unique": False, "type": "btree"}, + {"columns": "column_b, column_a", "unique": True, "type": "btree"}, + {"columns": "column_a", "unique": False, "type": "hash"}, + ] + assert len(indexes) == len(expected) + + def test_incremental(self, project, unique_schema): + for additional_argument in [[], [], ["--full-refresh"]]: + results = run_dbt(["run", "--models", "incremental"] + additional_argument) + assert len(results) == 1 + + indexes = self.get_indexes('incremental', project, unique_schema) + expected = [ + {"columns": "column_a", "unique": False, "type": "hash"}, + {"columns": "column_a, column_b", "unique": True, "type": "btree"}, + ] + assert len(indexes) == len(expected) + + def test_seed(self, project, unique_schema): + for additional_argument in [[], [], ['--full-refresh']]: + results = run_dbt(["seed"] + additional_argument) + assert len(results) == 1 + + indexes = self.get_indexes('seed', project, unique_schema) + expected = [ + {"columns": "country_code", "unique": False, "type": "hash"}, + {"columns": "country_code, country_name", "unique": True, "type": "btree"}, + ] + assert len(indexes) == len(expected) + + def test_snapshot(self, project, unique_schema): + for version in [1, 2]: + results = run_dbt(["snapshot", "--vars", f"version: {version}"]) + assert len(results) == 1 + + indexes = self.get_indexes('colors', project, unique_schema) + expected = [ + {"columns": "id", "unique": False, "type": "hash"}, + {"columns": "id, color", "unique": True, "type": "btree"}, + ] + assert len(indexes) == len(expected) + + def get_indexes(self, table_name, project, unique_schema): + sql = f""" + SELECT + pg_get_indexdef(idx.indexrelid) as index_definition + FROM pg_index idx + JOIN pg_class tab ON tab.oid = idx.indrelid + WHERE + tab.relname = '{table_name}' + AND tab.relnamespace = ( + SELECT oid FROM pg_namespace WHERE nspname = '{unique_schema}' + ); + """ + results = project.run_sql(sql, fetch="all") + return [self.parse_index_definition(row[0]) for row in results] + + def parse_index_definition(self, index_definition): + index_definition = index_definition.lower() + is_unique = "unique" in index_definition + m = INDEX_DEFINITION_PATTERN.search(index_definition) + return { + "columns": m.group(2), + "unique": is_unique, + "type": m.group(1), + } + + def assertCountEqual(self, a, b): + assert len(a) == len(b) + + +class TestPostgresInvalidIndex(): + @pytest.fixture(scope="class") + def models(self): + return { + "invalid_unique_config.sql": models_invalid__invalid_unique_config_sql, + "invalid_type.sql": models_invalid__invalid_type_sql, + "invalid_columns_type.sql": models_invalid__invalid_columns_type_sql, + "missing_columns.sql": models_invalid__missing_columns_sql, + } + + def test_invalid_index_configs(self, project): + results, output = run_dbt_and_capture(expect_pass=False) + assert len(results) == 4 + assert re.search(r"columns.*is not of type 'array'", output) + assert re.search(r"unique.*is not of type 'boolean'", output) + assert re.search(r"'columns' is a required property", output) + assert re.search(r"Database Error in model invalid_type", output) diff --git a/tests/functional/relation_names/test_relation_name.py b/tests/functional/relation_names/test_relation_name.py new file mode 100644 index 00000000000..5d941d96da5 --- /dev/null +++ b/tests/functional/relation_names/test_relation_name.py @@ -0,0 +1,124 @@ +import pytest + +from dbt.contracts.results import RunStatus +from dbt.tests.util import run_dbt + +# Test coverage: A relation is a name for a database entity, i.e. a table or view. Every relation has +# a name. These tests verify the default Postgres rules for relation names are followed. Adapters +# may override connection rules and thus may have their own tests. + +seeds__seed = """col_A,col_B +1,2 +3,4 +5,6 +""" + +models__basic_incremental = """ +select * from {{ this.schema }}.seed + +{{ + config({ + "unique_key": "col_A", + "materialized": "incremental" + }) +}} +""" + +models__basic_table = """ +select * from {{ this.schema }}.seed + +{{ + config({ + "materialized": "table" + }) +}} +""" + + +class TestGeneratedDDLNameRules: + @classmethod + def setup_class(self): + self.incremental_filename = "my_name_is_51_characters_incremental_abcdefghijklmn" + # length is 63 + self.max_length_filename = "my_name_is_max_length_chars_abcdefghijklmnopqrstuvwxyz123456789" + # length is 64 + self.over_max_length_filename = "my_name_is_one_over_max_length_chats_abcdefghijklmnopqrstuvwxyz1" + + self.filename_for_backup_file = "my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0" + + @pytest.fixture(scope="class", autouse=True) + def setUp(self, project): + run_dbt(["seed"]) + + @pytest.fixture(scope="class") + def seeds(self): + return {"seed.csv": seeds__seed} + + @pytest.fixture(scope="class") + def models(self): + return { + f"{self.incremental_filename}.sql": + models__basic_incremental, + f"{self.filename_for_backup_file}.sql": + models__basic_table, + f"{self.max_length_filename}.sql": + models__basic_table, + f"{self.over_max_length_filename}.sql": + models__basic_table, + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": False, + }, + } + + # Backup table name generation: + # 1. for len(relation name) <= 51, backfills + # 2. for len(relation name) > 51 characters, overwrites + # the last 12 characters with __dbt_backup + def test_name_shorter_or_equal_to_63_passes(self, project): + run_dbt( + [ + "run", + "-s", + f"{self.max_length_filename}", + f"{self.filename_for_backup_file}", + ], + ) + + def test_long_name_passes_when_temp_tables_are_generated(self): + run_dbt( + [ + "run", + "-s", + f"{self.incremental_filename}", + ], + ) + + # Run again to trigger incremental materialization + run_dbt( + [ + "run", + "-s", + f"{self.incremental_filename}", + ], + ) + + # 63 characters is the character limit for a table name in a postgres database + # (assuming compiled without changes from source) + def test_name_longer_than_63_does_not_build(self): + err_msg = "Relation name 'my_name_is_one_over_max"\ + "_length_chats_abcdefghijklmnopqrstuvwxyz1' is longer than 63 characters" + res = run_dbt( + [ + "run", + "-s", + self.over_max_length_filename, + ], + expect_pass=False + ) + assert res[0].status == RunStatus.Error + assert err_msg in res[0].message diff --git a/test/integration/044_run_operations_tests/macros/happy_macros.sql b/tests/functional/run_operations/fixtures.py similarity index 82% rename from test/integration/044_run_operations_tests/macros/happy_macros.sql rename to tests/functional/run_operations/fixtures.py index c5c6df4dc8a..f6ed82e20ec 100644 --- a/test/integration/044_run_operations_tests/macros/happy_macros.sql +++ b/tests/functional/run_operations/fixtures.py @@ -1,3 +1,4 @@ +happy_macros_sql = """ {% macro no_args() %} {% if execute %} {% call statement(auto_begin=True) %} @@ -53,4 +54,19 @@ {% macro print_something() %} {{ print("You're doing awesome!") }} -{% endmacro %} \ No newline at end of file +{% endmacro %} +""" + +sad_macros_sql = """ +{% macro syntax_error() %} + {% if execute %} + {% call statement() %} + select NOPE NOT A VALID QUERY + {% endcall %} + {% endif %} +{% endmacro %} +""" + +model_sql = """ +select 1 as id +""" diff --git a/tests/functional/run_operations/test_run_operations.py b/tests/functional/run_operations/test_run_operations.py new file mode 100644 index 00000000000..f91ef2d8359 --- /dev/null +++ b/tests/functional/run_operations/test_run_operations.py @@ -0,0 +1,104 @@ +import os +import pytest +import yaml + +from dbt.tests.util import ( + check_table_does_exist, + run_dbt +) +from tests.functional.run_operations.fixtures import ( + happy_macros_sql, + sad_macros_sql, + model_sql +) + + +class TestOperations: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": model_sql} + + @pytest.fixture(scope="class") + def macros(self): + return { + "happy_macros.sql": happy_macros_sql, + "sad_macros.sql": sad_macros_sql + } + + @pytest.fixture(scope="class") + def dbt_profile_data(self, unique_schema): + return { + "config": {"send_anonymous_usage_stats": False}, + "test": { + "outputs": { + "default": { + "type": "postgres", + "threads": 4, + "host": "localhost", + "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)), + "user": os.getenv("POSTGRES_TEST_USER", "root"), + "pass": os.getenv("POSTGRES_TEST_PASS", "password"), + "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"), + "schema": unique_schema, + }, + "noaccess": { + "type": "postgres", + "threads": 4, + "host": "localhost", + "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)), + "user": 'noaccess', + "pass": 'password', + "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"), + 'schema': unique_schema + } + }, + "target": "default", + }, + } + + def run_operation(self, macro, expect_pass=True, extra_args=None, **kwargs): + args = ['run-operation', macro] + if kwargs: + args.extend(('--args', yaml.safe_dump(kwargs))) + if extra_args: + args.extend(extra_args) + return run_dbt(args, expect_pass=expect_pass) + + def test_macro_noargs(self, project): + self.run_operation('no_args') + check_table_does_exist(project.adapter, 'no_args') + + def test_macro_args(self, project): + self.run_operation('table_name_args', table_name='my_fancy_table') + check_table_does_exist(project.adapter, 'my_fancy_table') + + def test_macro_exception(self, project): + self.run_operation('syntax_error', False) + + def test_macro_missing(self, project): + self.run_operation('this_macro_does_not_exist', False) + + def test_cannot_connect(self, project): + self.run_operation('no_args', + extra_args=['--target', 'noaccess'], + expect_pass=False) + + def test_vacuum(self, project): + run_dbt(['run']) + # this should succeed + self.run_operation('vacuum', table_name='model') + + def test_vacuum_ref(self, project): + run_dbt(['run']) + # this should succeed + self.run_operation('vacuum_ref', ref_target='model') + + def test_select(self, project): + self.run_operation('select_something', name='world') + + def test_access_graph(self, project): + self.run_operation('log_graph') + + def test_print(self, project): + # Tests that calling the `print()` macro does not cause an exception + self.run_operation('print_something') diff --git a/tests/functional/schema_tests/test_schema_v2_tests.py b/tests/functional/schema_tests/test_schema_v2_tests.py index 00c14cd711b..44a6696931b 100644 --- a/tests/functional/schema_tests/test_schema_v2_tests.py +++ b/tests/functional/schema_tests/test_schema_v2_tests.py @@ -95,7 +95,7 @@ alt_local_utils__macros__type_timestamp_sql, all_quotes_schema__schema_yml, ) -from dbt.exceptions import ParsingException, CompilationException +from dbt.exceptions import ParsingException, CompilationException, DuplicateResourceName from dbt.contracts.results import TestStatus @@ -904,9 +904,9 @@ def test_generic_test_collision( project, ): """These tests collide, since only the configs differ""" - with pytest.raises(CompilationException) as exc: + with pytest.raises(DuplicateResourceName) as exc: run_dbt() - assert "dbt found two tests with the name" in str(exc) + assert "dbt found two tests with the name" in str(exc.value) class TestGenericTestsConfigCustomMacros: diff --git a/test/integration/030_statement_tests/seed/seed.csv b/tests/functional/statements/fixtures.py similarity index 89% rename from test/integration/030_statement_tests/seed/seed.csv rename to tests/functional/statements/fixtures.py index 640af6c4ee6..e05f697644a 100644 --- a/test/integration/030_statement_tests/seed/seed.csv +++ b/tests/functional/statements/fixtures.py @@ -1,4 +1,12 @@ -id,first_name,last_name,email,gender,ip_address +# +# Seeds +# +seeds__statement_expected = """source,value +matrix,100 +table,100 +""" + +seeds__statement_actual = """id,first_name,last_name,email,gender,ip_address 1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 @@ -99,3 +107,32 @@ 98,Angela,Brooks,abrooks2p@mtv.com,Female,10.63.249.126 99,Harold,Foster,hfoster2q@privacy.gov.au,Male,139.214.40.244 100,Carl,Meyer,cmeyer2r@disqus.com,Male,204.117.7.88 +""" + +# +# Models +# +models__statement_actual = """ +-- {{ ref('seed') }} + +{%- call statement('test_statement', fetch_result=True) -%} + + select + count(*) as "num_records" + + from {{ ref('seed') }} + +{%- endcall -%} + +{% set result = load_result('test_statement') %} + +{% set res_table = result['table'] %} +{% set res_matrix = result['data'] %} + +{% set matrix_value = res_matrix[0][0] %} +{% set table_value = res_table[0]['num_records'] %} + +select 'matrix' as source, {{ matrix_value }} as value +union all +select 'table' as source, {{ table_value }} as value +""" diff --git a/tests/functional/statements/test_statements.py b/tests/functional/statements/test_statements.py new file mode 100644 index 00000000000..4b8640b8066 --- /dev/null +++ b/tests/functional/statements/test_statements.py @@ -0,0 +1,43 @@ +import pathlib +import pytest + +from dbt.tests.util import ( + run_dbt, + check_relations_equal, + write_file +) +from tests.functional.statements.fixtures import ( + models__statement_actual, + seeds__statement_actual, + seeds__statement_expected, +) + + +class TestStatements: + @pytest.fixture(scope="class", autouse=True) + def setUp(self, project): + # put seeds in 'seed' not 'seeds' directory + (pathlib.Path(project.project_root) / "seed").mkdir(parents=True, exist_ok=True) + write_file(seeds__statement_actual, project.project_root, "seed", "seed.csv") + write_file(seeds__statement_expected, project.project_root, "seed", "statement_expected.csv") + + @pytest.fixture(scope="class") + def models(self): + return {"statement_actual.sql": models__statement_actual} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": False, + }, + "seed-paths": ["seed"], + } + + def test_postgres_statements(self, project): + results = run_dbt(["seed"]) + assert len(results) == 2 + results = run_dbt() + assert len(results) == 1 + + check_relations_equal(project.adapter, ["statement_actual", "statement_expected"]) diff --git a/tests/functional/store_test_failures_tests/fixtures.py b/tests/functional/store_test_failures_tests/fixtures.py new file mode 100644 index 00000000000..dae8530135e --- /dev/null +++ b/tests/functional/store_test_failures_tests/fixtures.py @@ -0,0 +1,126 @@ +# +# Seeds +# +seeds__people = """id,first_name,last_name,email,gender,ip_address +1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 +2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 +3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 +4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 +5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 +6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 +7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 +8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 +9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 +10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 +""" + +seeds__expected_accepted_values = """value_field,n_records +Gary,1 +Rose,1 +""" + +seeds__expected_failing_test = """id,first_name,last_name,email,gender,ip_address +1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 +2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 +3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 +4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 +5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 +6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 +7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 +8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 +9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 +10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 +""" + +seeds__expected_not_null_problematic_model_id = """id,first_name,last_name,email,gender,ip_address +,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 +,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 +""" + +seeds__expected_unique_problematic_model_id = """unique_field,n_records +2,2 +1,2 +""" + +# +# Schema +# +properties__schema_yml = """ +version: 2 + +models: + + - name: fine_model + columns: + - name: id + tests: + - unique + - not_null + + - name: problematic_model + columns: + - name: id + tests: + - unique: + store_failures: true + - not_null + - name: first_name + tests: + # test truncation of really long test name + - accepted_values: + values: + - Jack + - Kathryn + - Gerald + - Bonnie + - Harold + - Jacqueline + - Wanda + - Craig + # - Gary + # - Rose + + - name: fine_model_but_with_a_no_good_very_long_name + columns: + - name: quite_long_column_name + tests: + # test truncation of really long test name with builtin + - unique +""" + +# +# Models +# +models__fine_model = """ +select * from {{ ref('people') }} +""" + +models__file_model_but_with_a_no_good_very_long_name = """ +select 1 as quite_long_column_name +""" + +models__problematic_model = """ +select * from {{ ref('people') }} + +union all + +select * from {{ ref('people') }} +where id in (1,2) + +union all + +select null as id, first_name, last_name, email, gender, ip_address from {{ ref('people') }} +where id in (3,4) +""" + +# +# Tests +# +tests__failing_test = """ +select * from {{ ref('fine_model') }} +""" + +tests__passing_test = """ +select * from {{ ref('fine_model') }} +where false +""" diff --git a/tests/functional/store_test_failures_tests/test_store_test_failures.py b/tests/functional/store_test_failures_tests/test_store_test_failures.py new file mode 100644 index 00000000000..ff26d7d97d3 --- /dev/null +++ b/tests/functional/store_test_failures_tests/test_store_test_failures.py @@ -0,0 +1,152 @@ +import pytest + +from dbt.tests.util import ( + check_relations_equal, + run_dbt, +) + +from tests.functional.store_test_failures_tests.fixtures import ( + seeds__people, + seeds__expected_accepted_values, + seeds__expected_failing_test, + seeds__expected_not_null_problematic_model_id, + seeds__expected_unique_problematic_model_id, + properties__schema_yml, + models__problematic_model, + models__fine_model, + models__file_model_but_with_a_no_good_very_long_name, + tests__failing_test, + tests__passing_test, +) + +# used to rename test audit schema to help test schema meet max char limit +# the default is _dbt_test__audit but this runs over the postgres 63 schema name char limit +# without which idempotency conditions will not hold (i.e. dbt can't drop the schema properly) +TEST_AUDIT_SCHEMA_SUFFIX = "dbt_test__aud" + + +class StoreTestFailuresBase: + @pytest.fixture(scope="function", autouse=True) + def setUp(self, project): + self.test_audit_schema = f"{project.test_schema}_{TEST_AUDIT_SCHEMA_SUFFIX}" + run_dbt(["seed"]) + run_dbt(["run"]) + + @pytest.fixture(scope="class") + def seeds(self): + return { + "people.csv": seeds__people, + "expected_accepted_values.csv": seeds__expected_accepted_values, + "expected_failing_test.csv": seeds__expected_failing_test, + "expected_not_null_problematic_model_id.csv": + seeds__expected_not_null_problematic_model_id, + "expected_unique_problematic_model_id.csv": + seeds__expected_unique_problematic_model_id, + } + + @pytest.fixture(scope="class") + def tests(self): + return { + "failing_test.sql": tests__failing_test, + "passing_test.sql": tests__passing_test, + } + + @pytest.fixture(scope="class") + def properties(self): + return {"schema.yml": properties__schema_yml} + + @pytest.fixture(scope="class") + def models(self): + return { + "fine_model.sql": models__fine_model, + "fine_model_but_with_a_no_good_very_long_name.sql": + models__file_model_but_with_a_no_good_very_long_name, + "problematic_model.sql": models__problematic_model, + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": False, + "test": self.column_type_overrides(), + }, + "tests": { + "+schema": TEST_AUDIT_SCHEMA_SUFFIX + } + } + + def column_type_overrides(self): + return {} + + def run_tests_store_one_failure(self, project): + run_dbt(["test"], expect_pass=False) + + # one test is configured with store_failures: true, make sure it worked + check_relations_equal( + project.adapter, + [ + f"{self.test_audit_schema}.unique_problematic_model_id", + "expected_unique_problematic_model_id" + ] + ) + + def run_tests_store_failures_and_assert(self, project): + # make sure this works idempotently for all tests + run_dbt(["test", "--store-failures"], expect_pass=False) + results = run_dbt(["test", "--store-failures"], expect_pass=False) + + # compare test results + actual = [(r.status, r.failures) for r in results] + expected = [('pass', 0), ('pass', 0), ('pass', 0), ('pass', 0), + ('fail', 2), ('fail', 2), ('fail', 2), ('fail', 10)] + assert sorted(actual) == sorted(expected) + + # compare test results stored in database + check_relations_equal(project.adapter, [ + f"{self.test_audit_schema}.failing_test", + "expected_failing_test" + ]) + check_relations_equal(project.adapter, [ + f"{self.test_audit_schema}.not_null_problematic_model_id", + "expected_not_null_problematic_model_id" + ]) + check_relations_equal(project.adapter, [ + f"{self.test_audit_schema}.unique_problematic_model_id", + "expected_unique_problematic_model_id" + ]) + check_relations_equal(project.adapter, [ + f"{self.test_audit_schema}.accepted_values_problemat" + "ic_mo_c533ab4ca65c1a9dbf14f79ded49b628", + "expected_accepted_values" + ]) + + +class TestStoreTestFailures(StoreTestFailuresBase): + @pytest.fixture(scope="function") + def clean_up(self, project): + yield + with project.adapter.connection_named('__test'): + relation = project.adapter.Relation.create(database=project.database, schema=self.test_audit_schema) + project.adapter.drop_schema(relation) + + relation = project.adapter.Relation.create(database=project.database, schema=project.test_schema) + project.adapter.drop_schema(relation) + + def column_type_overrides(self): + return { + "expected_unique_problematic_model_id": { + "+column_types": { + "n_records": "bigint", + }, + }, + "expected_accepted_values": { + "+column_types": { + "n_records": "bigint", + }, + }, + } + + def test__store_and_assert(self, project, clean_up): + self.run_tests_store_one_failure(project) + self.run_tests_store_failures_and_assert(project) diff --git a/tests/functional/threading/test_thread_count.py b/tests/functional/threading/test_thread_count.py new file mode 100644 index 00000000000..c31f5ed6312 --- /dev/null +++ b/tests/functional/threading/test_thread_count.py @@ -0,0 +1,46 @@ +import pytest +from dbt.tests.util import run_dbt + + +models__do_nothing__sql = """ +with x as (select pg_sleep(1)) select 1 +""" + + +class TestThreadCount: + @pytest.fixture(scope="class") + def models(self): + return { + "do_nothing_1.sql": models__do_nothing__sql, + "do_nothing_2.sql": models__do_nothing__sql, + "do_nothing_3.sql": models__do_nothing__sql, + "do_nothing_4.sql": models__do_nothing__sql, + "do_nothing_5.sql": models__do_nothing__sql, + "do_nothing_6.sql": models__do_nothing__sql, + "do_nothing_7.sql": models__do_nothing__sql, + "do_nothing_8.sql": models__do_nothing__sql, + "do_nothing_9.sql": models__do_nothing__sql, + "do_nothing_10.sql": models__do_nothing__sql, + "do_nothing_11.sql": models__do_nothing__sql, + "do_nothing_12.sql": models__do_nothing__sql, + "do_nothing_13.sql": models__do_nothing__sql, + "do_nothing_14.sql": models__do_nothing__sql, + "do_nothing_15.sql": models__do_nothing__sql, + "do_nothing_16.sql": models__do_nothing__sql, + "do_nothing_17.sql": models__do_nothing__sql, + "do_nothing_18.sql": models__do_nothing__sql, + "do_nothing_19.sql": models__do_nothing__sql, + "do_nothing_20.sql": models__do_nothing__sql, + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return {"config-version": 2} + + @pytest.fixture(scope="class") + def profiles_config_update(self): + return {"threads": 2} + + def test_threading_8x(self, project): + results = run_dbt(args=["run", "--threads", "16"]) + assert len(results), 20 diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index c2064b84c1a..3dbff04c303 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -1,7 +1,7 @@ # flake8: noqa from dbt.events.test_types import UnitTestInfo from dbt.events import AdapterLogger -from dbt.events.functions import event_to_json, LOG_VERSION, reset_event_history +from dbt.events.functions import event_to_json, LOG_VERSION, event_to_dict from dbt.events.types import * from dbt.events.test_types import * @@ -13,13 +13,13 @@ ErrorLevel, TestLevel, ) -from dbt.events.proto_types import NodeInfo, RunResultMsg, ReferenceKeyMsg +from dbt.events.proto_types import ListOfStrings, NodeInfo, RunResultMsg, ReferenceKeyMsg from importlib import reload import dbt.events.functions as event_funcs import dbt.flags as flags import inspect import json -from dbt.contracts.graph.parsed import ParsedModelNode, NodeConfig, DependsOn +from dbt.contracts.graph.nodes import ModelNode, NodeConfig, DependsOn from dbt.contracts.files import FileHash from mashumaro.types import SerializableType from typing import Generic, TypeVar, Dict @@ -29,10 +29,8 @@ def get_all_subclasses(cls): all_subclasses = [] for subclass in cls.__subclasses__(): - # If the test breaks because of abcs this list might have to be updated. - if subclass in [TestLevel, DebugLevel, WarnLevel, InfoLevel, ErrorLevel]: - continue - all_subclasses.append(subclass) + if subclass not in [TestLevel, DebugLevel, WarnLevel, InfoLevel, ErrorLevel, DynamicLevel]: + all_subclasses.append(subclass) all_subclasses.extend(get_all_subclasses(subclass)) return set(all_subclasses) @@ -81,7 +79,7 @@ def test_formatting(self): event = AdapterEventDebug(name="dbt_tests", base_msg=[1,2,3], args=(3,)) assert isinstance(event.base_msg, str) - event = MacroEventDebug(msg=[1,2,3]) + event = JinjaLogDebug(msg=[1,2,3]) assert isinstance(event.msg, str) @@ -93,41 +91,19 @@ def test_event_codes(self): all_concrete = get_all_subclasses(BaseEvent) all_codes = set() - for event in all_concrete: - if not inspect.isabstract(event): - # must be in the form 1 capital letter, 3 digits - assert re.match("^[A-Z][0-9]{3}", event.code) - # cannot have been used already - assert ( - event.info.code not in all_codes - ), f"{event.code} is assigned more than once. Check types.py for duplicates." - all_codes.add(event.info.code) - - -class TestEventBuffer: - def setUp(self) -> None: - flags.EVENT_BUFFER_SIZE = 10 - reload(event_funcs) - - # ensure events are populated to the buffer exactly once - def test_buffer_populates(self): - self.setUp() - event_funcs.fire_event(UnitTestInfo(msg="Test Event 1")) - event_funcs.fire_event(UnitTestInfo(msg="Test Event 2")) - event1 = event_funcs.EVENT_HISTORY[-2] - assert event_funcs.EVENT_HISTORY.count(event1) == 1 - - # ensure events drop from the front of the buffer when buffer maxsize is reached - def test_buffer_FIFOs(self): - reset_event_history() - event_funcs.EVENT_HISTORY.clear() - for n in range(1, (flags.EVENT_BUFFER_SIZE + 2)): - event_funcs.fire_event(UnitTestInfo(msg=f"Test Event {n}")) - assert event_funcs.EVENT_HISTORY.count(UnitTestInfo(msg="Test Event 1")) == 0 + for event_cls in all_concrete: + code = event_cls.code(event_cls) + # must be in the form 1 capital letter, 3 digits + assert re.match("^[A-Z][0-9]{3}", code) + # cannot have been used already + assert ( + code not in all_codes + ), f"{code} is assigned more than once. Check types.py for duplicates." + all_codes.add(code) def MockNode(): - return ParsedModelNode( + return ModelNode( alias="model_one", name="model_one", database="dbt", @@ -164,56 +140,62 @@ def MockNode(): sample_values = [ - MainReportVersion(version="", log_version=LOG_VERSION), - MainKeyboardInterrupt(), - MainEncounteredError(exc=""), - MainStackTrace(stack_trace=""), + # A - pre-project loading + MainReportVersion(version=""), + MainReportArgs(args={}), MainTrackingUserState(user_state=""), - ParseCmdStart(), - ParseCmdCompiling(), - ParseCmdWritingManifest(), - ParseCmdDone(), - ManifestDependenciesLoaded(), - ManifestLoaderCreated(), - ManifestLoaded(), - ManifestChecked(), - ManifestFlatGraphBuilt(), - ParseCmdPerfInfoPath(path=""), - GitSparseCheckoutSubdirectory(subdir=""), - GitProgressCheckoutRevision(revision=""), - GitProgressUpdatingExistingDependency(dir=""), - GitProgressPullingNewDependency(dir=""), - GitNothingToDo(sha=""), - GitProgressUpdatedCheckoutRange(start_sha="", end_sha=""), - GitProgressCheckedOutAt(end_sha=""), - SystemErrorRetrievingModTime(path=""), - SystemCouldNotWrite(path="", reason="", exc=""), - SystemExecutingCmd(cmd=[""]), - SystemStdOutMsg(bmsg=b""), - SystemStdErrMsg(bmsg=b""), - SelectorReportInvalidSelector(valid_selectors="", spec_method="", raw_spec=""), - MacroEventInfo(msg=""), - MacroEventDebug(msg=""), + MergedFromState(num_merged=0, sample=[]), + MissingProfileTarget(profile_name="", target_name=""), + InvalidVarsYAML(), + DbtProjectError(), + DbtProjectErrorException(exc=""), + DbtProfileError(), + DbtProfileErrorException(exc=""), + ProfileListTitle(), + ListSingleProfile(profile=""), + NoDefinedProfiles(), + ProfileHelpMessage(), + StarterProjectPath(dir=""), + ConfigFolderDirectory(dir=""), + NoSampleProfileFound(adapter=""), + ProfileWrittenWithSample(name="", path=""), + ProfileWrittenWithTargetTemplateYAML(name="", path=""), + ProfileWrittenWithProjectTemplateYAML(name="", path=""), + SettingUpProfile(), + InvalidProfileTemplateYAML(), + ProjectNameAlreadyExists(name=""), + ProjectCreated(project_name=""), + + # D - Deprecations ====================== + PackageRedirectDeprecation(old_name="", new_name=""), + PackageInstallPathDeprecation(), + ConfigSourcePathDeprecation(deprecated_path="", exp_path=""), + ConfigDataPathDeprecation(deprecated_path="", exp_path=""), + AdapterDeprecationWarning(old_name="", new_name=""), + MetricAttributesRenamed(metric_name=""), + ExposureNameDeprecation(exposure=""), + + # E - DB Adapter ====================== + AdapterEventDebug(), + AdapterEventInfo(), + AdapterEventWarning(), + AdapterEventError(), NewConnection(conn_type="", conn_name=""), ConnectionReused(conn_name=""), - ConnectionLeftOpen(conn_name=""), - ConnectionClosed(conn_name=""), + ConnectionLeftOpenInCleanup(conn_name=""), + ConnectionClosedInCleanup(conn_name=""), RollbackFailed(conn_name=""), - ConnectionClosed2(conn_name=""), - ConnectionLeftOpen2(conn_name=""), + ConnectionClosed(conn_name=""), + ConnectionLeftOpen(conn_name=""), Rollback(conn_name=""), CacheMiss(conn_name="", database="", schema=""), - ListRelations(database="", schema="", relations=[]), + ListRelations(database="", schema=""), ConnectionUsed(conn_type="", conn_name=""), SQLQuery(conn_name="", sql=""), SQLQueryStatus(status="", elapsed=0.1), - CodeExecution(conn_name="", code_content=""), - CodeExecutionStatus(status="", elapsed=0.1), SQLCommit(conn_name=""), ColTypeChange( - orig_type="", - new_type="", - table=ReferenceKeyMsg(database="", schema="", identifier=""), + orig_type="", new_type="", table=ReferenceKeyMsg(database="", schema="", identifier="") ), SchemaCreation(relation=ReferenceKeyMsg(database="", schema="", identifier="")), SchemaDrop(relation=ReferenceKeyMsg(database="", schema="", identifier="")), @@ -231,6 +213,7 @@ def MockNode(): dropped=ReferenceKeyMsg(database="", schema="", identifier=""), consequences=[ReferenceKeyMsg(database="", schema="", identifier="")], ), + DropRelation(dropped=ReferenceKeyMsg()), UpdateReference( old_key=ReferenceKeyMsg(database="", schema="", identifier=""), new_key=ReferenceKeyMsg(database="", schema="", identifier=""), @@ -246,29 +229,49 @@ def MockNode(): DumpBeforeRenameSchema(dump=dict()), DumpAfterRenameSchema(dump=dict()), AdapterImportError(exc=""), - PluginLoadError(), - SystemReportReturnCode(returncode=0), + PluginLoadError(exc_info=""), NewConnectionOpening(connection_state=""), - TimingInfoCollected(), - MergedFromState(num_merged=0, sample=[]), - MissingProfileTarget(profile_name="", target_name=""), - InvalidVarsYAML(), + CodeExecution(conn_name="", code_content=""), + CodeExecutionStatus(status="", elapsed=0.1), + CatalogGenerationError(exc=""), + WriteCatalogFailure(num_exceptions=0), + CatalogWritten(path=""), + CannotGenerateDocs(), + BuildingCatalog(), + DatabaseErrorRunningHook(hook_type=""), + HooksRunning(num_hooks=0, hook_type=""), + HookFinished(stat_line="", execution="", execution_time=0), + + # I - Project parsing ====================== + ParseCmdStart(), + ParseCmdCompiling(), + ParseCmdWritingManifest(), + ParseCmdDone(), + ManifestDependenciesLoaded(), + ManifestLoaderCreated(), + ManifestLoaded(), + ManifestChecked(), + ManifestFlatGraphBuilt(), + ParseCmdPerfInfoPath(path=""), GenericTestFileParse(path=""), MacroFileParse(path=""), PartialParsingFullReparseBecauseOfError(), - PartialParsingFile(file_id=""), PartialParsingExceptionFile(file=""), + PartialParsingFile(file_id=""), PartialParsingException(exc_info={}), PartialParsingSkipParsing(), PartialParsingMacroChangeStartFullParse(), + PartialParsingProjectEnvVarsChanged(), + PartialParsingProfileEnvVarsChanged(), + PartialParsingDeletedMetric(unique_id=""), ManifestWrongMetadataVersion(version=""), PartialParsingVersionMismatch(saved_version="", current_version=""), PartialParsingFailedBecauseConfigChange(), PartialParsingFailedBecauseProfileChange(), PartialParsingFailedBecauseNewProjectDependency(), PartialParsingFailedBecauseHashChanged(), - PartialParsingDeletedMetric(unique_id=""), - ParsedFileLoadFailed(path="", exc=""), + PartialParsingNotEnabled(), + ParsedFileLoadFailed(path="", exc="", exc_info=""), PartialParseSaveFileNotFound(), StaticParserCausedJinjaRendering(path=""), UsingExperimentalParser(path=""), @@ -289,51 +292,179 @@ def MockNode(): PartialParsingUpdateSchemaFile(file_id=""), PartialParsingDeletedSource(unique_id=""), PartialParsingDeletedExposure(unique_id=""), - InvalidDisabledSourceInTestNode(msg=""), - InvalidRefInTestNode(msg=""), + InvalidDisabledTargetInTestNode( + resource_type_title="", + unique_id="", + original_file_path="", + target_kind="", + target_name="", + target_package="", + ), + UnusedResourceConfigPath(unused_config_paths=[]), + SeedIncreased(package_name="", name=""), + SeedExceedsLimitSamePath(package_name="", name=""), + SeedExceedsLimitAndPathChanged(package_name="", name=""), + SeedExceedsLimitChecksumChanged(package_name="", name="", checksum_name=""), + UnusedTables(unused_tables=[]), + WrongResourceSchemaFile(patch_name="", resource_type="", file_path="", plural_resource_type=""), + NoNodeForYamlKey(patch_name="", yaml_key="", file_path=""), + MacroPatchNotFound(patch_name=""), + NodeNotFoundOrDisabled( + original_file_path="", + unique_id="", + resource_type_title="", + target_name="", + target_kind="", + target_package="", + disabled="", + ), + JinjaLogWarning(), + + # M - Deps generation ====================== + + GitSparseCheckoutSubdirectory(subdir=""), + GitProgressCheckoutRevision(revision=""), + GitProgressUpdatingExistingDependency(dir=""), + GitProgressPullingNewDependency(dir=""), + GitNothingToDo(sha=""), + GitProgressUpdatedCheckoutRange(start_sha="", end_sha=""), + GitProgressCheckedOutAt(end_sha=""), + RegistryProgressGETRequest(url=""), + RegistryProgressGETResponse(url="", resp_code=1234), + SelectorReportInvalidSelector(valid_selectors="", spec_method="", raw_spec=""), + JinjaLogInfo(msg=""), + JinjaLogDebug(msg=""), + DepsNoPackagesFound(), + DepsStartPackageInstall(package_name=""), + DepsInstallInfo(version_name=""), + DepsUpdateAvailable(version_latest=""), + DepsUpToDate(), + DepsListSubdirectory(subdirectory=""), + DepsNotifyUpdatesAvailable(packages=ListOfStrings()), + RetryExternalCall(attempt=0, max=0), + RecordRetryException(exc=""), + RegistryIndexProgressGETRequest(url=""), + RegistryIndexProgressGETResponse(url="", resp_code=1234), + RegistryResponseUnexpectedType(response=""), + RegistryResponseMissingTopKeys(response=""), + RegistryResponseMissingNestedKeys(response=""), + RegistryResponseExtraNestedKeys(response=""), + DepsSetDownloadDirectory(path=""), + + # Q - Node execution ====================== + RunningOperationCaughtError(exc=""), + CompileComplete(), + FreshnessCheckComplete(), + SeedHeader(header=""), + SeedHeaderSeparator(len_header=0), + SQLRunnerException(exc=""), + LogTestResult( + name="", + index=0, + num_models=0, + execution_time=0, + num_failures=0, + ), + LogStartLine(description="", index=0, total=0, node_info=NodeInfo()), + LogModelResult( + description="", + status="", + index=0, + total=0, + execution_time=0, + ), + LogSnapshotResult( + status="", + description="", + cfg={}, + index=0, + total=0, + execution_time=0, + ), + LogSeedResult( + status="", + index=0, + total=0, + execution_time=0, + schema="", + relation="", + ), + LogFreshnessResult( + source_name="", + table_name="", + index=0, + total=0, + execution_time=0, + ), + LogCancelLine(conn_name=""), + DefaultSelector(name=""), + NodeStart(node_info=NodeInfo()), + NodeFinished(node_info=NodeInfo()), + QueryCancelationUnsupported(type=""), + ConcurrencyLine(num_threads=0, target_name=""), + WritingInjectedSQLForNode(node_info=NodeInfo()), + NodeCompiling(node_info=NodeInfo()), + NodeExecuting(node_info=NodeInfo()), + LogHookStartLine( + statement="", + index=0, + total=0, + ), + LogHookEndLine( + statement="", + status="", + index=0, + total=0, + execution_time=0, + ), + SkippingDetails( + resource_type="", + schema="", + node_name="", + index=0, + total=0, + ), + NothingToDo(), RunningOperationUncaughtError(exc=""), - DbtProjectError(), - DbtProjectErrorException(exc=""), - DbtProfileError(), - DbtProfileErrorException(exc=""), - ProfileListTitle(), - ListSingleProfile(profile=""), - NoDefinedProfiles(), - ProfileHelpMessage(), + EndRunResult(), + NoNodesSelected(), + DepsUnpinned(revision="", git=""), + NoNodesForSelectionCriteria(spec_raw=""), + + # W - Node testing ====================== + CatchableExceptionOnRun(exc=""), InternalExceptionOnRun(build_path="", exc=""), GenericExceptionOnRun(build_path="", unique_id="", exc=""), NodeConnectionReleaseError(node_name="", exc=""), + FoundStats(stat_line=""), + + # Z - misc ====================== + + MainKeyboardInterrupt(), + MainEncounteredError(exc=""), + MainStackTrace(stack_trace=""), + SystemErrorRetrievingModTime(path=""), + SystemCouldNotWrite(path="", reason="", exc=""), + SystemExecutingCmd(cmd=[""]), + SystemStdOutMsg(bmsg=b""), + SystemStdErrMsg(bmsg=b""), + SystemReportReturnCode(returncode=0), + TimingInfoCollected(), + LogDebugStackTrace(), CheckCleanPath(path=""), ConfirmCleanPath(path=""), ProtectedCleanPath(path=""), FinishedCleanPaths(), OpenCommand(open_cmd="", profiles_dir=""), - DepsNoPackagesFound(), - DepsStartPackageInstall(package_name=""), - DepsInstallInfo(version_name=""), - DepsUpdateAvailable(version_latest=""), - DepsListSubdirectory(subdirectory=""), - DepsNotifyUpdatesAvailable(packages=[]), - DatabaseErrorRunningHook(hook_type=""), EmptyLine(), - HooksRunning(num_hooks=0, hook_type=""), - HookFinished(stat_line="", execution="", execution_time=0), - WriteCatalogFailure(num_exceptions=0), - CatalogWritten(path=""), - CannotGenerateDocs(), - BuildingCatalog(), - CompileComplete(), - FreshnessCheckComplete(), ServingDocsPort(address="", port=0), ServingDocsAccessInfo(port=""), ServingDocsExitInfo(), - SeedHeader(header=""), - SeedHeaderSeparator(len_header=0), RunResultWarning(resource_type="", node_name="", path=""), RunResultFailure(resource_type="", node_name="", path=""), - StatsLine(stats={"pass": 0, "warn": 0, "error": 0, "skip": 0, "total": 0}), + StatsLine(stats={"error": 0, "skip": 0, "pass": 0, "warn": 0,"total": 0}), RunResultError(msg=""), RunResultErrorNoMessage(status=""), SQLCompiledPath(path=""), @@ -341,131 +472,39 @@ def MockNode(): FirstRunResultError(msg=""), AfterFirstRunResultError(msg=""), EndOfRunSummary(num_errors=0, num_warnings=0, keyboard_interrupt=False), - PrintStartLine(description="", index=0, total=0, node_info=NodeInfo()), - PrintHookStartLine(statement="", index=0, total=0, node_info=NodeInfo()), - PrintHookEndLine( - statement="", status="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - SkippingDetails( - resource_type="", schema="", node_name="", index=0, total=0, node_info=NodeInfo() - ), - PrintErrorTestResult(name="", index=0, num_models=0, execution_time=0, node_info=NodeInfo()), - PrintPassTestResult(name="", index=0, num_models=0, execution_time=0, node_info=NodeInfo()), - PrintWarnTestResult( - name="", index=0, num_models=0, execution_time=0, num_failures=0, node_info=NodeInfo() - ), - PrintFailureTestResult( - name="", index=0, num_models=0, execution_time=0, num_failures=0, node_info=NodeInfo() - ), - PrintSkipBecauseError(schema="", relation="", index=0, total=0), - PrintModelErrorResultLine( - description="", status="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintModelResultLine( - description="", status="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintSnapshotErrorResultLine( - status="", description="", cfg={}, index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintSnapshotResultLine( - status="", description="", cfg={}, index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintSeedErrorResultLine( - status="", index=0, total=0, execution_time=0, schema="", relation="", node_info=NodeInfo() - ), - PrintSeedResultLine( - status="", index=0, total=0, execution_time=0, schema="", relation="", node_info=NodeInfo() - ), - PrintFreshnessErrorLine( - source_name="", table_name="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintFreshnessErrorStaleLine( - source_name="", table_name="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintFreshnessWarnLine( - source_name="", table_name="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintFreshnessPassLine( - source_name="", table_name="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintCancelLine(conn_name=""), - DefaultSelector(name=""), - NodeStart(unique_id="", node_info=NodeInfo()), - NodeCompiling(unique_id="", node_info=NodeInfo()), - NodeExecuting(unique_id="", node_info=NodeInfo()), - NodeFinished(unique_id="", node_info=NodeInfo(), run_result=RunResultMsg()), - QueryCancelationUnsupported(type=""), - ConcurrencyLine(num_threads=0, target_name=""), - StarterProjectPath(dir=""), - ConfigFolderDirectory(dir=""), - NoSampleProfileFound(adapter=""), - ProfileWrittenWithSample(name="", path=""), - ProfileWrittenWithTargetTemplateYAML(name="", path=""), - ProfileWrittenWithProjectTemplateYAML(name="", path=""), - SettingUpProfile(), - InvalidProfileTemplateYAML(), - ProjectNameAlreadyExists(name=""), - ProjectCreated(project_name="", docs_url="", slack_url=""), - DepsSetDownloadDirectory(path=""), + LogSkipBecauseError(schema="", relation="", index=0, total=0), EnsureGitInstalled(), DepsCreatingLocalSymlink(), DepsSymlinkNotAvailable(), - FoundStats(stat_line=""), - CompilingNode(unique_id=""), - WritingInjectedSQLForNode(unique_id=""), DisableTracking(), SendingEvent(kwargs=""), SendEventFailure(), FlushEvents(), FlushEventsFailure(), TrackingInitializeFailure(), - RetryExternalCall(attempt=0, max=0), - GeneralWarningMsg(msg="", log_fmt=""), - GeneralWarningException(exc="", log_fmt=""), - PartialParsingProfileEnvVarsChanged(), - AdapterEventDebug(name="", base_msg="", args=()), - AdapterEventInfo(name="", base_msg="", args=()), - AdapterEventWarning(name="", base_msg="", args=()), - AdapterEventError(name="", base_msg="", args=()), - PrintDebugStackTrace(), - MainReportArgs(args={}), - RegistryProgressGETRequest(url=""), - RegistryIndexProgressGETRequest(url=""), - RegistryIndexProgressGETResponse(url="", resp_code=1), - RegistryResponseUnexpectedType(response=""), - RegistryResponseMissingTopKeys(response=""), - RegistryResponseMissingNestedKeys(response=""), - RegistryResponseExtraNestedKeys(response=""), - DepsUpToDate(), - PartialParsingNotEnabled(), - SQLRunnerException(exc=""), - DropRelation(dropped=ReferenceKeyMsg(database="", schema="", identifier="")), - PartialParsingProjectEnvVarsChanged(), - RegistryProgressGETResponse(url="", resp_code=1), - IntegrationTestDebug(msg=""), - IntegrationTestInfo(msg=""), - IntegrationTestWarn(msg=""), - IntegrationTestError(msg=""), - IntegrationTestException(msg=""), - EventBufferFull(), - RecordRetryException(exc=""), - UnitTestInfo(msg=""), + RunResultWarningMessage(), + + # T - tests ====================== + IntegrationTestInfo(), + IntegrationTestDebug(), + IntegrationTestWarn(), + IntegrationTestError(), + IntegrationTestException(), + UnitTestInfo(), + ] + + class TestEventJSONSerialization: # attempts to test that every event is serializable to json. # event types that take `Any` are not possible to test in this way since some will serialize # just fine and others won't. def test_all_serializable(self): - no_test = [DummyCacheEvent] - all_non_abstract_events = set( - filter( - lambda x: not inspect.isabstract(x) and x not in no_test, - get_all_subclasses(BaseEvent), - ) + get_all_subclasses(BaseEvent), ) all_event_values_list = list(map(lambda x: x.__class__, sample_values)) diff = all_non_abstract_events.difference(set(all_event_values_list)) @@ -479,7 +518,7 @@ def test_all_serializable(self): # if we have everything we need to test, try to serialize everything for event in sample_values: - event_dict = event.to_dict() + event_dict = event_to_dict(event) try: event_json = event_to_json(event) except Exception as e: @@ -487,30 +526,3 @@ def test_all_serializable(self): T = TypeVar("T") - - -@dataclass -class Counter(Generic[T], SerializableType): - dummy_val: T - count: int = 0 - - def next(self) -> T: - self.count = self.count + 1 - return self.dummy_val - - # mashumaro serializer - def _serialize() -> Dict[str, int]: - return {"count": count} - - -@dataclass -class DummyCacheEvent(InfoLevel, Cache, SerializableType): - code = "X999" - counter: Counter - - def message(self) -> str: - return f"state: {self.counter.next()}" - - # mashumaro serializer - def _serialize() -> str: - return "DummyCacheEvent" diff --git a/tests/unit/test_proto_events.py b/tests/unit/test_proto_events.py index 46e9479ef39..d5b070c41e2 100644 --- a/tests/unit/test_proto_events.py +++ b/tests/unit/test_proto_events.py @@ -5,14 +5,15 @@ RollbackFailed, MainEncounteredError, PluginLoadError, - PrintStartLine, + LogStartLine, + LogTestResult, ) -from dbt.events.functions import event_to_dict, LOG_VERSION, reset_metadata_vars +from dbt.events.functions import event_to_dict, LOG_VERSION, reset_metadata_vars, info from dbt.events import proto_types as pl from dbt.version import installed -info_keys = {"name", "code", "msg", "level", "invocation_id", "pid", "thread", "ts", "extra"} +info_keys = {"name", "code", "msg", "level", "invocation_id", "pid", "thread", "ts", "extra", "category"} def test_events(): @@ -89,7 +90,7 @@ def test_node_info_events(): "node_started_at": "some_time", "node_finished_at": "another_time", } - event = PrintStartLine( + event = LogStartLine( description="some description", index=123, total=111, @@ -121,3 +122,16 @@ def test_extra_dict_on_event(monkeypatch): # clean up reset_metadata_vars() + + +def test_dynamic_level_events(): + event = LogTestResult( + name="model_name", + info=info(level=LogTestResult.status_to_level("pass")), + status="pass", + index=1, + num_models=3, + num_failures=0 + ) + assert event + assert event.info.level == "info" diff --git a/tests/unit/test_version.py b/tests/unit/test_version.py index 6545891fc54..217988ba5e2 100644 --- a/tests/unit/test_version.py +++ b/tests/unit/test_version.py @@ -673,10 +673,16 @@ def mock_import(*args, **kwargs): def mock_versions(mocker, installed="1.0.0", latest=None, plugins={}): mocker.patch("dbt.version.__version__", installed) - mock_plugins(mocker, plugins) mock_latest_versions(mocker, latest, plugins) + # mock_plugins must be called last to avoid erronously raising an ImportError. + mock_plugins(mocker, plugins) +# NOTE: mock_plugins patches importlib.import_module, and should always be the last +# patch to be mocked in order to avoid erronously raising an ImportError. +# Explanation: As of Python 3.11, mock.patch indirectly uses importlib.import_module +# and thus uses the mocked object (in this case, mock_import) instead of the real +# implementation in subsequent mock.patch calls. Issue: https://github.com/python/cpython/issues/98771 def mock_plugins(mocker, plugins): mock_find_spec = mocker.patch("importlib.util.find_spec") path = "/tmp/dbt/adapters" diff --git a/tox.ini b/tox.ini index c77b9f92272..53187161c7f 100644 --- a/tox.ini +++ b/tox.ini @@ -2,7 +2,7 @@ skipsdist = True envlist = unit,integration -[testenv:{unit,py37,py38,py39,py310,py}] +[testenv:{unit,py37,py38,py39,py310,py311,py}] description = unit testing download = true skip_install = true @@ -16,8 +16,8 @@ deps = -rdev-requirements.txt -reditable-requirements.txt -[testenv:{integration,py37-integration,py38-integration,py39-integration,py310-integration,py-integration}] -description = adapter plugin integration testing +[testenv:{integration,py37-integration,py38-integration,py39-integration,py310-integration,py311-integration,py-integration}] +description = functional testing download = true skip_install = true passenv = @@ -25,10 +25,9 @@ passenv = POSTGRES_TEST_* PYTEST_ADDOPTS commands = + {envpython} -m pytest --cov=core -m profile_postgres {posargs} test/integration {envpython} -m pytest --cov=core {posargs} tests/functional {envpython} -m pytest --cov=core {posargs} tests/adapter - {envpython} -m pytest --cov=core -m profile_postgres {posargs} test/integration - deps = -rdev-requirements.txt From 5b31cc4266e8dd5d2e5e94755343baf72dd866a0 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Fri, 6 Jan 2023 20:12:40 -0500 Subject: [PATCH 15/54] Remove UnsetProfileConfig (#6504) remove UnsetProfileConfig --- .../Under the Hood-20230105-104748.yaml | 7 + core/dbt/cli/main.py | 5 +- core/dbt/config/__init__.py | 2 +- core/dbt/config/runtime.py | 195 +----------------- core/dbt/task/base.py | 3 +- core/dbt/task/deps.py | 54 +---- test/unit/test_config.py | 39 ---- 7 files changed, 24 insertions(+), 281 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230105-104748.yaml diff --git a/.changes/unreleased/Under the Hood-20230105-104748.yaml b/.changes/unreleased/Under the Hood-20230105-104748.yaml new file mode 100644 index 00000000000..02f0e01988f --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230105-104748.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Remove UnsetProfileConfig +time: 2023-01-05T10:47:48.707656-05:00 +custom: + Author: MichelleArk + Issue: "5539" + PR: "6504" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 5292f795665..c1fe1742dce 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -241,10 +241,7 @@ def debug(ctx, **kwargs): @requires.project def deps(ctx, **kwargs): """Pull the most recent version of the dependencies listed in packages.yml""" - flags = ctx.obj["flags"] - project = ctx.obj["project"] - - task = DepsTask.from_project(project, flags.VARS) + task = DepsTask(ctx.obj["flags"], ctx.obj["project"]) results = task.run() success = task.interpret_results(results) diff --git a/core/dbt/config/__init__.py b/core/dbt/config/__init__.py index 5988ba589c3..1fa43bed3a5 100644 --- a/core/dbt/config/__init__.py +++ b/core/dbt/config/__init__.py @@ -1,4 +1,4 @@ # all these are just exports, they need "noqa" so flake8 will not complain. from .profile import Profile, read_user_config # noqa from .project import Project, IsFQNResource, PartialProject # noqa -from .runtime import RuntimeConfig, UnsetProfileConfig # noqa +from .runtime import RuntimeConfig # noqa diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index 46f03226b57..e13de6d0ae9 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -1,7 +1,7 @@ import itertools import os from copy import deepcopy -from dataclasses import dataclass, field +from dataclasses import dataclass from pathlib import Path from typing import ( Any, @@ -415,8 +415,8 @@ def _connection_keys(self): return () -# This is used by UnsetProfileConfig, for commands which do -# not require a profile, i.e. dbt deps and clean +# This is used by commands which do not require +# a profile, i.e. dbt deps and clean class UnsetProfile(Profile): def __init__(self): self.credentials = UnsetCredentials() @@ -435,189 +435,12 @@ def __getattribute__(self, name): return Profile.__getattribute__(self, name) -# This class is used by the dbt deps and clean commands, because they don't -# require a functioning profile. -@dataclass -class UnsetProfileConfig(RuntimeConfig): - """This class acts a lot _like_ a RuntimeConfig, except if your profile is - missing, any access to profile members results in an exception. - """ - - profile_name: str = field(repr=False) - target_name: str = field(repr=False) - - def __post_init__(self): - # instead of futzing with InitVar overrides or rewriting __init__, just - # `del` the attrs we don't want users touching. - del self.profile_name - del self.target_name - # don't call super().__post_init__(), as that calls validate(), and - # this object isn't very valid - - def __getattribute__(self, name): - # Override __getattribute__ to check that the attribute isn't 'banned'. - if name in {"profile_name", "target_name"}: - raise RuntimeException(f'Error: disallowed attribute "{name}" - no profile!') - - # avoid every attribute access triggering infinite recursion - return RuntimeConfig.__getattribute__(self, name) - - def to_target_dict(self): - # re-override the poisoned profile behavior - return DictDefaultEmptyStr({}) - - def to_project_config(self, with_packages=False): - """Return a dict representation of the config that could be written to - disk with `yaml.safe_dump` to get this configuration. - - Overrides dbt.config.Project.to_project_config to omit undefined profile - attributes. - - :param with_packages bool: If True, include the serialized packages - file in the root. - :returns dict: The serialized profile. - """ - result = deepcopy( - { - "name": self.project_name, - "version": self.version, - "project-root": self.project_root, - "profile": "", - "model-paths": self.model_paths, - "macro-paths": self.macro_paths, - "seed-paths": self.seed_paths, - "test-paths": self.test_paths, - "analysis-paths": self.analysis_paths, - "docs-paths": self.docs_paths, - "asset-paths": self.asset_paths, - "target-path": self.target_path, - "snapshot-paths": self.snapshot_paths, - "clean-targets": self.clean_targets, - "log-path": self.log_path, - "quoting": self.quoting, - "models": self.models, - "on-run-start": self.on_run_start, - "on-run-end": self.on_run_end, - "dispatch": self.dispatch, - "seeds": self.seeds, - "snapshots": self.snapshots, - "sources": self.sources, - "tests": self.tests, - "metrics": self.metrics, - "exposures": self.exposures, - "vars": self.vars.to_dict(), - "require-dbt-version": [v.to_version_string() for v in self.dbt_version], - "config-version": self.config_version, - } - ) - if self.query_comment: - result["query-comment"] = self.query_comment.to_dict(omit_none=True) - - if with_packages: - result.update(self.packages.to_dict(omit_none=True)) - - return result - - @classmethod - def from_parts( - cls, - project: Project, - profile: Profile, - args: Any, - dependencies: Optional[Mapping[str, "RuntimeConfig"]] = None, - ) -> "RuntimeConfig": - """Instantiate a RuntimeConfig from its components. - - :param profile: Ignored. - :param project: A parsed dbt Project. - :param args: The parsed command-line arguments. - :returns RuntimeConfig: The new configuration. - """ - cli_vars: Dict[str, Any] = getattr(args, "vars", {}) - - return cls( - project_name=project.project_name, - version=project.version, - project_root=project.project_root, - model_paths=project.model_paths, - macro_paths=project.macro_paths, - seed_paths=project.seed_paths, - test_paths=project.test_paths, - analysis_paths=project.analysis_paths, - docs_paths=project.docs_paths, - asset_paths=project.asset_paths, - target_path=project.target_path, - snapshot_paths=project.snapshot_paths, - clean_targets=project.clean_targets, - log_path=project.log_path, - packages_install_path=project.packages_install_path, - quoting=project.quoting, # we never use this anyway. - models=project.models, - on_run_start=project.on_run_start, - on_run_end=project.on_run_end, - dispatch=project.dispatch, - seeds=project.seeds, - snapshots=project.snapshots, - dbt_version=project.dbt_version, - packages=project.packages, - manifest_selectors=project.manifest_selectors, - selectors=project.selectors, - query_comment=project.query_comment, - sources=project.sources, - tests=project.tests, - metrics=project.metrics, - exposures=project.exposures, - vars=project.vars, - config_version=project.config_version, - unrendered=project.unrendered, - project_env_vars=project.project_env_vars, - profile_env_vars=profile.profile_env_vars, - profile_name="", - target_name="", - user_config=UserConfig(), - threads=getattr(args, "threads", 1), - credentials=UnsetCredentials(), - args=args, - cli_vars=cli_vars, - dependencies=dependencies, - ) - - @classmethod - def get_profile( - cls, - project_root: str, - cli_vars: Dict[str, Any], - args: Any, - ) -> Profile: - """ - Moving all logic regarding constructing a complete UnsetProfile to this function - This way we can have a clean load_profile function to call by the new CLI and remove - all logic for UnsetProfile once we migrate to new click CLI - """ - - profile = UnsetProfile() - # The profile (for warehouse connection) is not needed, but we want - # to get the UserConfig, which is also in profiles.yml - user_config = read_user_config(project_root) - profile.user_config = user_config - profile_renderer = ProfileRenderer(cli_vars) - profile.profile_env_vars = profile_renderer.ctx_obj.env_vars - return profile - - @classmethod - def from_args(cls: Type[RuntimeConfig], args: Any) -> "RuntimeConfig": - """Given arguments, read in dbt_project.yml from the current directory, - read in packages.yml if it exists, and use them to find the profile to - load. - - :param args: The arguments as parsed from the cli. - :raises DbtProjectError: If the project is invalid or missing. - :raises DbtProfileError: If the profile is invalid or missing. - :raises ValidationException: If the cli variables are invalid. - """ - project, profile = cls.collect_parts(args) - - return cls.from_parts(project=project, profile=profile, args=args) +UNUSED_RESOURCE_CONFIGURATION_PATH_MESSAGE = """\ +Configuration paths exist in your dbt_project.yml file which do not \ +apply to any resources. +There are {} unused configuration paths: +{} +""" def _is_config_used(path, fqns): diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index ef78c8d90bf..f6d37937e99 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -98,8 +98,7 @@ def set_log_format(cls): @classmethod def from_args(cls, args): try: - # This is usually RuntimeConfig but will be UnsetProfileConfig - # for the clean or deps tasks + # This is usually RuntimeConfig config = cls.ConfigType.from_args(args) except dbt.exceptions.DbtProjectError as exc: fire_event(DbtProjectError()) diff --git a/core/dbt/task/deps.py b/core/dbt/task/deps.py index d03ec3748dc..425f7771995 100644 --- a/core/dbt/task/deps.py +++ b/core/dbt/task/deps.py @@ -1,15 +1,10 @@ -from typing import Dict, Any, Optional - -from dbt import flags +from typing import Any, Optional import dbt.utils import dbt.deprecations import dbt.exceptions -from dbt.config.profile import read_user_config -from dbt.config.runtime import load_project, UnsetProfile from dbt.config.renderer import DbtProjectYamlRenderer -from dbt.config.utils import parse_cli_vars from dbt.deps.base import downloads_directory from dbt.deps.resolver import resolve_packages from dbt.deps.registry import RegistryPinnedPackage @@ -32,20 +27,13 @@ from dbt.config import Project -from dbt.task.base import NoneConfig class DepsTask(BaseTask): - ConfigType = NoneConfig - - def __init__( - self, - args: Any, - project: Project, - cli_vars: Dict[str, Any], - ): + def __init__(self, args: Any, project: Project): + move_to_nearest_project_dir(project.project_root) super().__init__(args=args, config=None, project=project) - self.cli_vars = cli_vars + self.cli_vars = args.vars def track_package_install( self, package_name: str, source_type: str, version: Optional[str] @@ -103,36 +91,4 @@ def run(self) -> None: ) if packages_to_upgrade: fire_event(EmptyLine()) - fire_event(DepsNotifyUpdatesAvailable(packages=ListOfStrings(packages_to_upgrade))) - - @classmethod - def _get_unset_profile(cls) -> UnsetProfile: - profile = UnsetProfile() - # The profile (for warehouse connection) is not needed, but we want - # to get the UserConfig, which is also in profiles.yml - user_config = read_user_config(flags.PROFILES_DIR) - profile.user_config = user_config - return profile - - @classmethod - def from_args(cls, args): - # deps needs to move to the project directory, as it does put files - # into the modules directory - nearest_project_dir = move_to_nearest_project_dir(args.project_dir) - - # N.B. parse_cli_vars is embedded into the param when using click. - # replace this with: - # cli_vars: Dict[str, Any] = getattr(args, "vars", {}) - # when this task is refactored for click - cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}")) - project_root: str = args.project_dir or nearest_project_dir - profile: UnsetProfile = cls._get_unset_profile() - project = load_project(project_root, args.version_check, profile, cli_vars) - - return cls(args, project, cli_vars) - - @classmethod - def from_project(cls, project: Project, cli_vars: Dict[str, Any]) -> "DepsTask": - move_to_nearest_project_dir(project.project_root) - # TODO: remove args=None once BaseTask does not require args - return cls(None, project, cli_vars) + fire_event(DepsNotifyUpdatesAvailable(packages=ListOfStrings(packages_to_upgrade))) \ No newline at end of file diff --git a/test/unit/test_config.py b/test/unit/test_config.py index 880a09cc7ad..456f16fade6 100644 --- a/test/unit/test_config.py +++ b/test/unit/test_config.py @@ -1194,45 +1194,6 @@ def test_from_args(self): self.assertEqual(config.project_name, 'my_test_project') -class TestUnsetProfileConfig(BaseConfigTest): - def setUp(self): - self.profiles_dir = '/invalid-profiles-path' - self.project_dir = '/invalid-root-path' - super().setUp() - self.default_project_data['project-root'] = self.project_dir - - def get_project(self): - return project_from_config_norender(self.default_project_data, verify_version=self.args.version_check) - - def get_profile(self): - renderer = empty_profile_renderer() - return dbt.config.Profile.from_raw_profiles( - self.default_profile_data, self.default_project_data['profile'], renderer - ) - - def test_str(self): - project = self.get_project() - profile = self.get_profile() - config = dbt.config.UnsetProfileConfig.from_parts(project, profile, {}) - - str(config) - - def test_repr(self): - project = self.get_project() - profile = self.get_profile() - config = dbt.config.UnsetProfileConfig.from_parts(project, profile, {}) - - repr(config) - - def test_to_project_config(self): - project = self.get_project() - profile = self.get_profile() - config = dbt.config.UnsetProfileConfig.from_parts(project, profile, {}) - project_config = config.to_project_config() - - self.assertEqual(project_config["profile"], "") - - class TestVariableRuntimeConfigFiles(BaseFileTest): def setUp(self): super().setUp() From 91b20b74829991f0fa4a11380db26344711555fb Mon Sep 17 00:00:00 2001 From: Kshitij Aranke Date: Wed, 11 Jan 2023 12:19:43 -0800 Subject: [PATCH 16/54] `dbt test` works with Click (#5556) Co-authored-by: Github Build Bot resolves https://github.com/dbt-labs/dbt-core/issues/5556 --- .../Under the Hood-20230109-151417.yaml | 6 + core/dbt/cli/main.py | 21 +- core/dbt/config/runtime.py | 6 +- .../docs/build/doctrees/environment.pickle | Bin 65160 -> 182190 bytes core/dbt/docs/build/doctrees/index.doctree | Bin 87794 -> 87716 bytes core/dbt/docs/build/html/.buildinfo | 2 +- .../docs/build/html/_sources/index.rst.txt | 32 + .../_sphinx_javascript_frameworks_compat.js | 134 - core/dbt/docs/build/html/_static/basic.css | 5 +- core/dbt/docs/build/html/_static/doctools.js | 2 +- .../docs/build/html/_static/jquery-3.6.0.js | 10881 ---------------- core/dbt/docs/build/html/_static/jquery.js | 2 - .../docs/build/html/_static/language_data.js | 2 +- .../docs/build/html/_static/searchtools.js | 2 +- .../build/html/_static/underscore-1.13.1.js | 2042 --- .../dbt/docs/build/html/_static/underscore.js | 6 - core/dbt/docs/build/html/genindex.html | 5 +- core/dbt/docs/build/html/index.html | 422 +- core/dbt/docs/build/html/search.html | 5 +- core/dbt/docs/build/html/searchindex.js | 2 +- core/dbt/task/deps.py | 3 +- 21 files changed, 270 insertions(+), 13310 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230109-151417.yaml delete mode 100644 core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js delete mode 100644 core/dbt/docs/build/html/_static/jquery-3.6.0.js delete mode 100644 core/dbt/docs/build/html/_static/jquery.js delete mode 100644 core/dbt/docs/build/html/_static/underscore-1.13.1.js delete mode 100644 core/dbt/docs/build/html/_static/underscore.js diff --git a/.changes/unreleased/Under the Hood-20230109-151417.yaml b/.changes/unreleased/Under the Hood-20230109-151417.yaml new file mode 100644 index 00000000000..7a30067c4d9 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230109-151417.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: '[CT-932] Implement `dbt test` in Click' +time: 2023-01-09T15:14:17.524221-08:00 +custom: + Author: aranke + Issue: "5556" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index c1fe1742dce..e47b6f58710 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -12,6 +12,7 @@ from dbt.task.clean import CleanTask from dbt.task.deps import DepsTask from dbt.task.run import RunTask +from dbt.task.test import TestTask # CLI invocation @@ -41,10 +42,11 @@ def __init__( def invoke(self, args: List[str]) -> Tuple[Optional[List], bool]: try: dbt_ctx = cli.make_context(cli.name, args) - dbt_ctx.obj = {} - dbt_ctx.obj["project"] = self.project - dbt_ctx.obj["profile"] = self.profile - dbt_ctx.obj["manifest"] = self.manifest + dbt_ctx.obj = { + "project": self.project, + "profile": self.profile, + "manifest": self.manifest, + } return cli.invoke(dbt_ctx) except (click.NoSuchOption, click.UsageError) as e: raise dbtUsageException(e.message) @@ -206,7 +208,8 @@ def docs_serve(ctx, **kwargs): @p.version_check @requires.preflight def compile(ctx, **kwargs): - """Generates executable SQL from source, model, test, and analysis files. Compiled SQL files are written to the target/ directory.""" + """Generates executable SQL from source, model, test, and analysis files. Compiled SQL files are written to the + target/ directory.""" click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") return None, True @@ -449,8 +452,12 @@ def freshness(ctx, **kwargs): @requires.preflight def test(ctx, **kwargs): """Runs tests on data in deployed models. Run this after `dbt run`""" - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") - return None, True + config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) + task = TestTask(ctx.obj["flags"], config) + + results = task.run() + success = task.interpret_results(results) + return results, success # Support running as a module diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index e13de6d0ae9..19806087a20 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -17,13 +17,14 @@ from dbt import flags from dbt.adapters.factory import get_include_paths, get_relation_class_by_name -from dbt.config.profile import read_user_config from dbt.config.project import load_raw_project from dbt.contracts.connection import AdapterRequiredConfig, Credentials, HasCredentials from dbt.contracts.graph.manifest import ManifestMetadata from dbt.contracts.project import Configuration, UserConfig from dbt.contracts.relation import ComponentName from dbt.dataclass_schema import ValidationError +from dbt.events.functions import warn_or_error +from dbt.events.types import UnusedResourceConfigPath from dbt.exceptions import ( ConfigContractBroken, DbtProjectError, @@ -31,10 +32,7 @@ RuntimeException, UninstalledPackagesFound, ) -from dbt.events.functions import warn_or_error -from dbt.events.types import UnusedResourceConfigPath from dbt.helper_types import DictDefaultEmptyStr, FQNPath, PathSet - from .profile import Profile from .project import Project from .renderer import DbtProjectYamlRenderer, ProfileRenderer diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index 8aaad5e25b0b97cc741c122d6608193f2544081f..9c21016481a75defca177eac32ef285685f42bf1 100644 GIT binary patch literal 182190 zcmeIb3!EHBbuMn}y-)3~Buhp%9$T1|Kr3PM78vjgn=4y#EED2jG4#&#?(}G8dfeS3 zX|WaXpMb5hf4?{2_%FVu6bTU!Xv~1uH#D_2M8o2n1m3>C0rojOCbMqs=B-S zbXT`$YpPiNe%L?I?sj#b{_3mmRGm6i-TjKScdk3_^waRaaK`U8oX+t*cIT+$xt+G% z3HssGB|WEAzdUv5j{dG&`X~B3!_`)|+dmMF*4>IW`r-5THr%$o*XwmUwzv0~ z>mAu!pAY70u4jv1;=lI7mHys&yaE1-M*M!b&i87})-#rG%XfQT%?2Aax3l0Z_QQ#& zPVwLCF69MDB}Uw%w&yu@xbumAxYoCW9;=jnJg_^y<97W1;eI#*m8jNQPVGqF3fFf% zw`tdaoDFf^;EbBvUGkj81{|Ke@JSb5xVr*{m8-0Itdi9W8ZOX-7evP<1H0XAS%K|W zyH?P^yNp(oV{n@(`*^L@tJ_!{*d7Nm+6daMYS6ITP|FdkWzAbY6hb?(0za?`*gx(gf$R>h~5Foa6m)tmd{_P-8|%ICjvh+n!y& z%mEI7$ZPxInmwM6V)1N0@OrhN2gLzU*!RPUPOlA3QC)DowiPhptoOMssPQ5GTjhLY z2~L9x`{DZMJBxeRp|ycE--06>uEU?~&}hxA+jH|R7uq5my~^Q)8tJPx- zxwCObV{5n?l-xgXYrpZB#`eZ@;FoYZYPxEwwN3;0$N)QYTjR{erFiCgkOek>zi|n_ zbiHS{Y|FQk18W1fZY?E$j<7$g3+ru;E!VNIxzD)CbKT&?o}1#b6E^|HCnCd@MH^&tJg8c&mV2za(0kd- zma2hgb$k#RYq*o)`sf_2YB<_v(y4;yf$*J9-98?f+JN_A#~U{=RE;M#_HuX|BW2hf zXa*o2MA!=!=AJRnaMA|m1UI*|U zCN>bV2fa|k_Mug8i>%@aZMWV75|NjrU?y;DRd*gevkDh^jwm`OUsUxQz!JBQ+N-;5 z%ZVH5Y`D4&igkmI8`xFPUI4v!YIfD{TF||mYU-g3}RxKu{ znFe$XE$kkWmIaMP1Vpq?i*wvAZ?dRy8)r4n=Ca%v_Yl>2=(Ipu2l(-cBevbG9<#g- zG)e+y+V6F{;FG>6IUL>W44{y;Ik-Z*?RKhg9{M_zGr*(Z&{ezZwA{tGY1kofpwyum zMilnl;P};rmIe2Y+G(;299rHI=!h{JuIhrbFdrE2z-R#kvEinF-~g&|iXTgLyC1#D7Ubo;{;vkjhFf-*-4Yp-&JR^149kef*syW4?{yPg2hc0VuH|UY$Ga@zKpb21fz#l|9>_9bdM0(OYB9&@ zhpTVA47w^t_vUnYG^pTcw=*1xdlJ?at!i2@>}+7;F?4>w6O8e|KJ<^QFSmQ#-q~E8 zRT#e-mhbp5LbU*R!)?K^kZ5m$jXf|t*{t_!Y;9b^nv)%lJJ&0ow}HhTr-f_qIPaI( z8GMLq{6*t0@TwVcteWx-RyNAZoB+HH?!u0Zv18Gw3!OhZ$PvX=?l%td!+em0ySE&_ z0YhngdNg<@!#a!%%*;k+8zP2I;4C;e_O9=Bj&$5((0inA?$_8L-S}I6a09!l=hXXf zgf$DtUBmdJzbk!c5zeyXLlHX9PKI!PDo4?E(e0wKofS0BZ#C|*v#tLwxZ+JV@@6Y=rn#6uMO)r z!JDz)bviAl17;a>yckqp<-^f!{O}aB2_ag|b6CfS9I^{|2?Tf056$-i2(jXlX?D#q z&%y8j#|s!hMQOmuPd#0zFp1u5n6K9ZL)<%L@zv}18@ zr2_q7561vD?neD-6rTZtpc)Kx1rPPeC^Eus2>q`M{Q-k8&W|~{l zU5zL3Uv@Me$Isu$&WB#svuHiHaMKVHRD0bzgrzVFvrvZJhKGng=yKiy7l&bq9p)Fo zfDA0V9jgTAtG!mf+>?Y5jC638iC!$@nFot?*rGiad?dZC;EaD__!Kf z$rXYw0GJ-OK~u_mIZTB^|vsy&tH zVt#`NYtrXN6ht*;gr)m)MjXFwTuYoQf>&R{#`CCEFo3S}eg|NQM&nh*AqPW+xFODB z)ZwgwO&ahDyW__3S>s{eT>P$VtcFkz=QmzF<}$NSsbXIO2P0WQSQ0dv(9N;QTllO- z5On>0d-vArojpyzZnvDH-ky#f?Co^hd!uuo_vF2{|J1$E5OeKzYp#aC;N*xg7FwXi zY&SC*-zw@Waa>+?!%mo0+_0-}bNI={b;pO%c!>?(t8REPo^&`qDKP|Uz-T0DjriIN zArzDye{%MCJkyAz0TR3(2kEK!I8JG>Lx>B;`d2kei5`NCs_?#AYS@Apv=h$aH|AkjwRXK3+)t6VEefiZFzu>CpU3}SP*Is_; z&~y6y&RfK#e0~PgZmZo7H(zz}fh%5k@fC30RaZT)8j=!PL^=Eu+ER|IaiWgoR$(6g}M z5vO$r_!*n|8M!VzZ8kNUuj;rFtWEqvR)&KD#KjmQuACK_y0YBw% z?4pQm8V#_L2s*TL4Rpswy66h%h9Kap9T77SexvQ!LGh9}6icflcM_YuL+=I&6vu=1 z5cilAmN2kIPhj^xQyll~8zKJ@R2#O%Lh+c{>GUalW(+eMc6F*KaN0JE2yxq|N-&Ry zq5b}~xr?k$4PqJ`7?`lC)Zo|MGBe>#u-pARzfEyhF z4~aqxXmIeWblxp?u?iBh-P(V8;ztvBnNVk>zQ^Fvwsi!CWiEG2#lUYajHS^jn|yGA zmd%IUIOb_qP&{meng(xnItwnQNLZj3`@6se6OrxBe}XH*a}IXw3e?uFqypHAyHH88 zh!a;ke$8&dAPMyQq92%vZzFO0skkifMRAGTX%p;%m~ca!>4+4yB{tE4D%yeNw7^k& zF+1Dh(wgT&W+6$ABpSe51Tnj{1_q4b`X^j8_bcZ;@ugAkJ;6@t*vF6?ONuogMs%*(#=*o$3L{TBLjoiz+$io4O-?7`YF4g<786z7F3!Y|8kGUwg}D%L zIDsa`esUU54;~h_&Jea()nOvRqv?%%d%m}K-s$XZIK9AWiAz#!C3g8W)5$e)fw-z7 zZUpf$E+wFI!2n$f26{?37SjboLez`084x>7 zG6@ugI1tA3r#K+L5ke1i6M-O_Q$__W@6A%t(G}xS0Sj5c7dkK-KN`=((q-AnMXxs> zPVlfFLMrfa801e6H<2T}BZOLo>mW>o)HXy2XM~f{%33mvt-mH(7K_)`8XFoTxN-o) zwOiw<`qx9e7A-Zrp*P=n{t5VdDl){!D}PU|zkgM@Eqky5Ybme{5Fos**+M5uwLn?C zh?!V}HOl=P!p)gWnBLIVnVEv7b=2Z{CWNsmQ;IV&JiaA!oOgo#{i}2L$8|BdAPdV6 z>Uek^x6|MM%wD)T>Yp%Q1^n}zO8;ayLyp5_9P-<+loM4DqUUe}@PcVBj1_l=lWal- zBVLrz4_86=8BVY?9d!K`U$$9;x`R2J4Vi2hX>qLrvY){3$#6>z&N>3=T^RVQFb?Aa z0*sosh2vdVV?a@|E-Zwc1fTQ5VSf*&0Ow45VU-cjOhpCxiXEr$dq!jrnjAE@1su^*AmwB54@h`Mr61bUF_Ce7m*4 zQwJv@D`>$Q6q^A-fE%8{yw!npj)!wtnDSwNQHSbpO6SqzWjl-|8mk+@$#5dhazN|V zA)|RP9A_6)<7|B|+yK!%294NIO%KMiD=z-xcXpofqt#n|m@C>ixr1B`E?hNsLzlvr z$6&sP%d9*x56uk0$7*zhJNmEc!_w=9XqBzrZ@vvIHh&xbVFeSA!oc{#Y8|td{Qm1< z0kMi}CYU=xzF?Tg1&4->657Vo8(5GlWJtq}k>o)T9qjnw&Tujde{m6*ZwVOjtYcA@ z4Z4dBwFWHhLx2Sl7i%Nz%l$q4^2Sr*tD5%!nIk4{5o43NzR8Wbox7uNGBE*6?I|PoP-fMZ!2fuBAth)FEBPlj3J`5|@>jMiS zELh}ieiUxjychrZF#huw_zwbTeg^&tFHzkRv6pptY?Z$GS+KN!Rd2reA^3$iT($}R zn3jaQuXK-90=MFHj=D!+CIh<|aM{r6)O|Qs;l5viT^~4u>&Ff8Nrb$-OQY7{415Ky z&oFR+p-1wYpMz2$y?Gz}grMSn{^Rrf#{>MwgX~AI`33lyqV`MpC#AOeMLfo+Sp(EC z4UT{$tVPf2HWGz1qQcs>xRP6eHJ=KMxRrUBb63F0QVdx!Ka~Z~ZD$CoI6j^VAv3%D zr9kJkY{Se;E641LeH>PGpu}f?!Bo$A{&t>@glEpgM&6Gm{cwhlu~Zde z+)V}>qh($~skrRf~F#`f}+^{;q)bkLNF)^R*kYJlup za9Yag!e6=G?Nw@4r((5yx6*}vgL#x^gMvDIH;rAd@O=$X0=h{k-CY3(#5x{xTCGS< zAf4!Bv;%vCEVy`~5{>(nUY83JWecJ3kv+7zRvHXxq{-DlDD~1rmI@k?HPUoJBQf`% zfVCvsgDC>vY($2lU%Cl$J3AnUVMF~6=T+EcjcxN zyDPJ}cLJifIb6(LgkWoS7ha3|SSsRL{F7e$d=QMiJH9pA+BG`50~#F`<6sv4GTyoH zop_*-pn+)T5H9E`28RNQZZ?X&`b9!rcsE3RQS_7D3Sr;@{J9J62Ig5G7L5kG;ScEC zQhKx>;jR*qjsgd}D}JvA%Y{Biw1*EoJ7Ei66&3>gT|~BzHpU7!26tQTOOFMw^_Mj~ zha-@)+sC(7!O9A>zU>`_MV7S?%d_292f}sH1Pit=Ci|#R2N1eOgZo+7S!BB4O$FYa z?Se0VzUwg+FZ7@vwU1kv$b;^;4(_!S_0DYQ!%_RC$?jDU(raqG`5~2Z`iE$a5t=%&js!r z?>*+AgM-W+4_o2b92eCb^kFP)wP*~H1bv5_2}5!jVd1`BbQY z0q5Yc_;m5ihebV-ax$!5ueXWCgyk3WhEtV1Z&{U`yp*@oz6xI%CR2)RA*o(7k7Agp zK~!%M4QG0o$1ST;YhgwYI|xW!qGO~AH<_x!@wf^JBe`&N;yOs&1M83#wPEWY!I~)} zm{01(%z`x*&?9C^*nH^=7AlYhfnD~+Z&wuiVU4mg&u!?*uTVeH7|@S#n+c;z3WVf4ykNsn_$<5CvvY#@rJMFjE3^dc65M#K_9pM9un*I+|1(=pR0iaMe= zBh^PZm(=5PX6um?;RF|9tc|P;zYK8n%5Y=>^VAEl4T!dF>Np|fA-#kMDgFZ*E|+4a z2PIm}lo8C|)r(n(5iv`|SUChsh#)?@1}R&&Q-S|2zym8m1-XWG;EWz0`E*j7|1w*f zoFq4LNeZ20MY(Z}Zc&coR07v6VEAcJUD6^#%vQjmG2>e&F$-xTW{EU= zIPkNs61ExehwNCoq3(fVVJ!p}*>UT2tAcyJTRuz4U}G%J!7d-GRqNr7FxJVYPiC9S zL~4v(i=hR}rnsGq-d6|%(aMv}@}6^|naGLKy3YF1rGQCq=RO1L7HoSKYzB&+K&0Lz zJAv@DUoTI>r>;c9!h^Ou#V!23Y-B8A@Ia4JS(x3Sjqa~ z%3vV+S~Sah&JVY7Ka}`I)*FujGQHl2Pd;Nxm})jTN_cs>US5RX-He9I{f>t9#KCA0 zLHt_1h=m6sVu=Sm`$Y|>hFN*Ob^y+T$@x8%gRtR)J&}$RTwEEzY!hUka0vnP?t7Nt zu>sly!8JlH-01+W1f9YNfHO-FFK2?mNQ4_VckCV1HEX4k0Dx#k9h<5 zSZ0)n-0U*Gk&JCSDRnAb4R#=s<6EKp%pkK80Ej+TuZ3pRydi+)m^tG)&za_-R$9qf z$Jqps^g7Nu9x>CNEIvr+*shlj;V#?IaJjqCFp3OHw1^ z_amH3>hW~5^~ecvoC`5hMplDY02;j-gbl?w*@mYnNz01EgpGrG*$_Is3Jsgk;bFaq zg$@z1M2Ank4!Yf#(+Y0%!~hBQ|G^F}pQo8%Q3_(E_#7XDBuc)b4boCa%+@=n+sU+U zK}}iZ-UN8`Dt8S&!bFMVhX@a^)XRfV>?_c4xneVEe&S%6GJ^Sby_kh&5wk?IPdWrl zR;-H8cf-IQLJY{G?PIuWAN7id@Npz~+ASm3QYxw^tJ1pxi(Zwk zW$V;brqMCN!iV&-AaweEG+eIJ^xTq&G+jW@ep)YDp-@CCQRpwnTL9vX)Qb)#7R4M9 z`c@Pu&pWbtXwzpaS@5h2cEGTk2P0j+Ig)K$j6%IkXkhY2guw^QGMH0+v|)g&F)RCr z0E}MQPr`$V_#z{|h6vAUrZeB!LPE^9^b#W+;a|{jxg%scWdb@=Mle6D7qf5K@# z3t-I_#EiLkOsB(ZApFWsRB|B^q72l)V)RcD=A@VwMv;huQd9Sgh&5@+3Z9 z4o_`DIXwIjFWST^j&s`uekiW>NOW%cjYXuiaG%%>cC1S z3b}>ifW_bx5?Bu&5)0VNpaGj1Jft05v+a=cq$nGg)_2yOUJj7-y3>08l8#KQk{l(3 z+^Uxl;YPoThRfY(gf9`n^I^1~OdTg!;jQ0h25ZDBaiSaI-HrUkE%-=U#B7uasT_l6 zh4JA>w&e^W7QTHa?J<#KMbF2vMN^h1$@q!cTM-uDVV1?53q)(Bu_m+fzX#yxmH!&p zN}bU@KSXHws9qX`@IQ=(%Y`2e6W3pS^;J~)=>mfGGkVdA@dMFHr2n2n@DgSiB;sA^ zZ0d=~Z&T!+I3JkG?PH?Jfwq)%PJcB1hVAv}=eWp@Ka>pavBWWsu_I&7tZBo5L8 zo8xfiN-!$?(t9!4y>De4Pm+dAi9BsKw8S#Mc=w(It>$lNxxxZ#+%p~N?lo8B7(TiDlEn=2< z?fm5(A45J^QH7UHL_}BM{V%{Z272(6HN4U-*@&7BY zAb8US1g)q zjuS865bEQDgpT|4(jnaCvuL>7U1%6Zu+k!e_{(|`3ztE}5|>%d(hRA?axNq_{HEC& z<|I1KCCW3=x$Pnk0~)>R94rfWI($k=P0J5iD)IN zd^8IO&$8;wATA>dcedHK&iUsk_fKgSZVs>v;+@&l!c_7?8r(IMdj>9FOw@j%UIv73 z4xe5a^rbYHnyLbR0`3vImrP=i^e~w+Li;c1#Vk4*#4PD#w7f!w)J}^DR?c73vdSU$H7w#$qQ%*k;zd)#xcEQ2)_)_aQT=?LpB1I77@g+(u-I~8WBq* zEq^Rx_}9|hVYVJQ8E%YaSoC6=cK{&0LZmPHpbbo34?-IF-Fk@;l6)5$E|+AcFC~yO zWd!p_^kNpGM9dOVuF(582p=nif$)B>S>AI_v^jPnBH!%v{0o3euP2SdHf{LofD3!A z=qhKrfH3n_z03$N`U)C0<3<0b7qjpp#4Pcm+3OBoc5q*%a=@)L;BzCebND#C;gUab zjhotz!rozgjUJ_Z;HyY`{Mc;Wa!Q?vm6}pcR;Oblx^4pnaiUv_i~?RwCw8W?78TbIPD1ZBsMbHaQnd!kWm% z9qsTZyAGDIu!vXxjZBMr9_0Ej-`Y?kqy z@4%!{w??NAIMO z(*>mE&ew}pC>GI56l?O;0HnqyZ$ub;vRMXm+E3Ppfdp<=&Y zDny5UB^oZ5K8+6uR$4?5U#}OjkUt`p$bUIoc1RtTb0MkWqS+eeh)=5lX!o4VNo5-4_$M=>mfGwR+JCl_FY+N*{fKJ9(mwnNUB zlDT5pGu-b6P}?{bpIrIYE-oFM5jms{lx^0}Qr*^c#AK5YqoT8ZMW9G*m3(DefQWMJvRQ zXeHu*bpPne?H^q|1_CniW+~p*ITsy|r{mHcq?-VZUjNKIt4(^sOd09Aw(I3Zc|+bl^=~@Ztvijvap| z7rbpHTCJj6n})PScvv#aL(U(kn4jF7Qdrg%PXeMrTyYsYt>Y~zL>%(%PPgmjL%8MZ z(69+5zfCVc{XNu_fJ!V3D+cI966x#2*7VdPPikG!YtU5uxSN zdT9}I{v;YMmvcI=M)0N!2-*kqq7_m`v=S+wGTVv>J*NyR(l-BSwrz3_HbWdtzSZfw zfJv{Hkqu3xRY(b8=3%|e2p{_oG_2dl){lcx6O&X#EAg?F*l9!>ZY2N^eQYtyc+PXC z6VEApGr{=)Nw4F?3zLNU_#mNUPA?t8U7mo3%iV=e2nbeML=ZnqFJdt)A!3QkEN9V? z)L}Unk{Z6iYz=b~9p@4)SgX7q(CAg?V3}o0FAGAMFG0iQ%3OYC+0~0ys1ngiRQYIT zdF3+8C(O2W&Ob-Fe@gSpuLUf6y)%Saav>l@K$fdniqB8dMmfGOucA@ zk`b*$$)`-Ri_mk*pdxKEYqo82{soBy;a~C$+|vQlAS2x1PDb)i-03)(&&G*l2rDDN|M_PPeDD}INqdmNBRNW{ z@k3M9_&ypg?-c2fOu*72f_UvD7&fuOjEE&2<8T=aQmf%i5f(O?Wg*vPjH3q>CPQ`r znn8Mv!8J@X9)v|&L|A#EURFeJvJ(xLt2v$XAb8US1no2Rq7_3tqLnE5lu0`fdQKTs zq;0M++cr7>noj$dJQ?yLfTY*SNXmmW3MnDPRP_=gTJ?mq=!dZikt=tw6KA0(vQtCtiZ$v;EG<&vb~KEX?Y5e2>zsRzF!z+^?PdYWAkJBw{%SdXVfu@ek)CO{-l_|yEXvyro;par zaYFjwESB-XYc?6|2>Pk3IMSe#hPlDi(a^-l{jPn%xO1feW(?Y&Q5-oM-!P&#T$~?uvcX z?o^xw{P6t}{NwtzD6KRsn4`?waE5)n+j4470H2SDtz@PvD};gQ>$PTi&w1ch=7AE` zXJueah=->y1mTs79S*}{H|V7gwy>t8ZLLb3o|Y>+w|QI`dr6q+xQb7} zRIZox!Amj*!0pgP?1E?}{vA!lzsKhCu6d7MHiT;;8xq%i4F47je2OLd0E_q}3+-*n zP9*L539~iGiF}NSJd#^h*q;Y5dWAj4?^HcvFJ%^8NeQ9kOL{3068{@CTrP1M*bEx; zd_yl{Awxv`Gm+sB%+??$!>N=Exj3>~Tsx&(i^&2rY1ZT@(clE!UE?fSXmAt_o6z8P zy@-Vd5wS#r!>tvQCK%2XX}4WwS;*;_N9{$g!FVR1(c7QWwPnI0Eh4O3td|v`=I5Z{ zay6${mkHi<0YQ6EFIu5wL@QD9DYL*!=s9Ijk+wN(wrz6$HOj%Z>1z-$TRYzH?({N;=!N1J9|kSuD${;BQvPYAyMW zkH5Fwy4}KGFg1feA(`U(J&#R*{f^azSK6`TzHQgBylyX8y%swJ8J+Bu_^!~|KireX6untBG38oELuM+!I5d*5)|LnA|e!ri43RV{zhinlW5q4 z4A0SvSjZ3&OJu0{sVqXiq7Bkgb7t$ElkEiSK;z_ZPPi8V9KC(#px@fMTrUx#=ez_B zmrFK1YbGt6E+EZzNH1C;S41n3tI1Dbks6!45n<3W%V19Uqg?ob71%k|-w0Ursy^Oq zN4Sj-5+0WH@*s476b+Z_UM*s{RWD+peMBtL{&F_b5x$pmA-2h#Yfd};H2m*Ge`oXF zRY2dJW~-VLDGw?Nw&MLRfYGbc!M5VPM=uFNp?`pe%M}`Rz{~hJ%18C073xH^5_LYh z6>sIX;(f+!Tj$(!gpDE6t$1GoD1$g>b}OFvoxN;g%XU<3%6H zpv(8`Pz-bMAHh@a@iEczJ4Ea_t&pX`um*sU(?UpgMddnI7Ek#*E^ zTGo6kBc4GoBCRnt1EiVQ$job&^QZ}?diZn$`;dArFxw7r2Jx!FcjzT6(Rj^+AB6SHyHxn$Sp# z2rVztON)^6b!fO;&gqFX!J951Xl=b{g_IGkM9Qbk(lDXtltD$>#y8tGIR~5JqeSV7 z^(z6BUN0l-(xg>L31Q|ndYKVEb{iVj?PGW8MJs#^(Mo)5B^HcH!>t4WqK|i)WjyCO z(~0L4E?IvFAnA3SIL}3>j}H<${!}j=!d?Ct4VSx%T7-35V6E%mXpIGd@tuh z(k5RvTf>}0$GJocl3w2eGV_yD^`Hg1VA?Ha^XU{#UiVu6_Ce-HuB)#sGd`q5aGdW6xIjfft;YN=` z!{u(2`jR|BnmSIfK2^rHl*;^47FEz_z&IO`;QqlM24*?v#@*ivo z<;8kw5W@c@G+ZwHXqZ^W5{jc2t&l#Vl}P{5C6tw0LOEu(t#b}K%A-B$8p^GJMXz^e zzVt(S!Au$HwqCE772&TT8ZP%&)kTT7>cuR)7BNe__UGj*K<_roYtBP9MIKW66`+p- zD7~&SdCYSHy9%EOau#6AGhMpN77}OqlwN{_vwR#4n{k%U>%}ab1u;vUWzfYq(nAe; z5owLDnypz*uA8}BGwR81i2V+r8bq(z?)dedXv!sp_k8JCuLkO{nhRcPh8r$Eb7qgHeVwOm; zawu@S@J1uMhVOmqRN#LLj^_szs6imgeBCY*wq*YhiGJVM6-kfJX|R+h#@V zS_~@i&5I!ajM`TS1JTOI%<`Udp(*A&CK%2XX}9m1Wg(|y7MsKh%1Ze3ExIL4cjFNfX%Qi1 z1aKIc`>jL6&-Hr^P7zr zUV+~fZqjoAkY2ZmUtd9pj}H~$0km&;x) zT-dG`v5-C@mPmg&Tk{Fu%ej!W$u6@s%t@045(PW@p9xs>cF1JkJ|T>h5JoQ6%ZTWl zpM!?WRhsUD3EXr6L3>ayTA@%xD^cj9JNo52`VX6Jhnyd=d12Xp{x$&8>rBbceWJ|d zD3N7PFCD^(JTzSHM5%rI1ZnCx!TL(QScMBAR*4Il-DFRyZ1z@!#oNuYm~#M@uP@ro z|8@YQSNwzR=Kp=YBna969vUu}eKbTYV>kbY^r98QN3;^*Kf0TL<#zLb+H70r+;am? zy`_8k9|RE#bO55soVViVxGJI?7nCsP3R@nY1IKk=gZP)Ra6Vqnndsg86{k>Kl zK9pFoDj2)KE^QC?W5dT47xz?VZ)z`9;|<&UD(sI|&pt6rx6b{^DPIxz&Okv`;NAc9 z`pA9yCMP8aHdG_>zZXDr{pbE$wiyFUJkJIika@%B6QLsLeH&w=@GL2RWw{K>vTSm z;7u10w0G)7D?VNUogvf&V$a(c~Id7uWtY}y$%#F z&XOjG4-#6wqn8%pHs40WSij{g0W!TCF*=Mztov z=JC?p`$51oh?izliW5mn5kIp?lP{Sv(pO!lmjmIQ!zU1vXVFQooE#<9uuWBC9u1dw zGHS!1uNSfCXb`caqml1(9{vgSTg=uY=S1VE@4`paZwEAkxX%&V#0z2a(6zv)SZN10=m} zw47}6eR>HI&huF`T<$z-o%xsbA{MTLh$XHwTnd#CfEW*di^GT{Fg8vA0%uzdf5=(vVexmy+tj&I;Iz~@EAla z@fi7BdH837UuCu)IUSBM9Tq$kd?$d>D?#z}FA=5VciO>c*|j6du^vDzTXXXY7o(g$YBNT)RdNQ}(I*P-EZcU4UpZPkleI4xq9IPK5N^XuoE zL4vMW6e6Hz3li z#CWeBRbqUUDDfV>d#vqn=T+|f1nqwP&1;HsQHxHa!KepWl)i} zS$!5XxV|OcY}@2~Y)dj$L_Q{ek-#Q^rPtBOmQT_!q=b;OT`xJp&9qRWQ1`$iVMjrGJzajL9*?Q!(xG~XU!KTog0FYi8irt??h~hAj;w$wMBBb~V zG;BhOx9deLq=<+mQdHc~Nyt~UL0al3yv2_2 z8y_TGd`mAELi_)MhRe0D7Ck(y7qQSkB9`cXIosz5-^;m>w8{FjfjSctL|((3K1Ezn zu=j2&0MaYaWM2?rEICT(I7crXLZWA(;c|(lb_EfnspACeoL;O#oQPE-PP1EhNR`dr zim>=Bvn=KuKxEU4b`0$YM0!17upL8(^fDo|e+?Qg*M1a%EMv!zr5CMGKcbbW|Ir;o zE4O3FHQUxXADu{M5Yk;kF9$e-IB0e+#740f!tHrAn?BK$D<$33uj-{hIA_s5h%9*V zYOuieGn+UvWd!&ef7ZYUU+x}s1IIf|RpGbLaCyh5x;N{6dNGUc5iv`;M=jeeNDQFG zB9Y>sn&l+djf|qF6l~tOAFvG4hYYUfWSa(IF*!<@_<~+0L~rufXt-R_YBRBa(u-K= z84*kLEYIT(zj@<(X6upD;5gG@(Z-F{kJYWhkp;_XRTr%65Rp1glsE~}Y(@6`=nypPv zk&{f3LMGWw7*7X0dV9thr&D)4yB1XWS_W`P8IzqrIC`#Lj)Y2|jfTrry5qW~uD!2P zYq(Age!h6twiaz~c6VjAW}zN>{_J5|ohN3lfDhKNYvA)VyDRWVd>#CKn0~M3YQ6Oo z%Eo$1l-)bTz9&?%TCEDo0=`<*umbq}5PU(X-n44)AtU@W5pOnlFMwzJEOPeYlR`)B zPQ_tw4OoJ!T;CR@m4SdXl5v+6!a(%(Vza#Gd~hrCL5W*r zo$&|&)9Z}OdA*~rmlol6T{K+oc4}eDEqW0P2SUUW2O4fW9*Go(GezY5db2F#xrn=T+|KcE+_P%@&GDEXAxKS$^} zWl)i}`IOnV$@$kb8!k$B^gRHO^g0>Yw?`UdeI6OL$neX zTZs*Iq~TTq0MW+}%`%>IoT=Dx3U~9ZKS#IU#2fbr^YKB##zwfek-7Ic8ZP%1wead} zy@-X!AYzHf$g@Dh@8)}=*?Q!3IL36yCqTJZF+B^w=)DXB zXW8V;m)Ns~gqL^g*>7ZXZ8gsTu<8QUbFShX?GLvw5g=BdBraPD7_+%cbt+AB0fk6 z`Kn$*glNBlhRa2(7WV#|Uc^GOh*%=o}4c|csS1$C^VTb$%2 zkAyK&LMYh;D2&X`rqFP?Ow*wRftxNMXwTJ)R!9`lN+kN|>mTK>f81-f9deGuGaO~F ze|#<=(%aQ1TepcclcPkJSLo$KboQ5_;c_oZZQLeEQ^yI`7wW|-dpmJJma&=pq+YZ_`G{7c{6{x) zuiR$tH=AwioPUn6`G|BQ_d5ZKUgylb;*s=$nKII8-L01t;jVv(hRfYmbt3&Sy_kj5 zB4&xx{=B^Y@pEQ*%{j;ncaYN8KYkf74d5v`+hg_lV9tlFcev*+n*{jUBN_e0)gWH_ zaANa|tKisY;h+7F>&@rxH*IyDxw>2HwPEX+CA({gr&O}1JOBn^7u|#GN3Zz>_}O?$ z^IrJT{4oAg2;w{mh_kTLHAwo*`E!qqCpml8uPhVV`%0m$>djlt&%r4t`a7HV!B42* z{rt!0`43X-a2?-U*Jl)S4w`qviQ#0e=Xtoe?Z;~l4I~4Aoyd9-=hkK zkHDX!(Vso91%FKhZml|R)sA4P-w)RUC~v7B&MdfY&~XDhRk;835WWso3p%}a_4tD8 zwfk@cl9`^ca<<+xZdLhRBXok2yi3T5DOp52)de(FMtYanJ6$UQlhp{=-&( z|ElJlP@(;6!)g1t0|bHQdVXBEHiRck=6j1^XY!ckbs*sfXLz>Nh9e_^2m2ebGX2|{ zw}X+!e0Y}A>Gp!E-Kn{CJlXa<*Q++HPQ7J&Kqi8z1^sZN+Y8bs0u#MpVeT3I@a#-U z#zgasbFn?S-S{@*>a%D;=HhS)(fU9|14w8A`hN2+1b#5w1iNcpuU@U&U1)YN>>zuM zQK+Uet}KvT!Z z_7jJ?zTK<4)#Gi{#1!oCgw3DT^p&icVY}~9$9K5KHdgD7Z_T%CX#38BgH5tNmNV0P zI1b=_0K+>CX!+>O>VngP%7E{@32@&Qj(OHGXd$#!M@dbi3qXr^7C|^Ir|ks&tEXn0 zuZ5EjG+%@D4Y9skx^|bDs)CfOfejzM!q4J_8{qid`A>REz2AHTyX19v$?Nfw5y%FE z9_vA)zkfp`I2lg)OVFtuuQr^;MhpHAN?6l!JB$71Z@~GtHQ$Vv{yJWIcCBH7ynsut zMPP@)Y+K%uUbla;`9?VFBs79sb23Z@Z11)O5>IM?33)wlkBXMJ=Oza5wpQM zb!OvBSWUFi#L=b~P9%K^0AVmDVEZsrMKw7B8?W8?JYL`Q5q=)901eijcj0k}sC#0k zZq6J`&K@DBjl_3l7tIj+(9L)Oif>ESR#b9YI0}AOb9?Ac{l@Na{iV_GcK|)F>R;Or zC%cvhZUar=!+vtSN%Mn11$2>1KFBjJ5Iy%jPzo;me)w@1X#5uZjQVse39q^wJ1bnu zI1hR+dv3nv){dN4;T!LrqxQbhwsqWT_u7>UV1M@_8=!Z>+6Qd?c8}Th-IWWTRPlRV z*sX2XGdr}eNlbt375`lr|Jp0ws%oCZ-ZkhGGXwkq7GZ$WmotDyL#BvhUkZjHjy)T6 zm1gRn_w1>NTI}0b=`O+k_0DJo{)3UTU4ignPp#$bX9iyZGlt;$Ay~tJBVKib#VJ&Mnv{-aXLrcBlb>2wri9=lkoFW zX|60DIo=5E?=MS2&E43o&0$BxV$kMU_%{nfQA}(i$HTK+=vx}vt&PH!)!`cKBVok9 z{Mu^|LNBlehXoj3q`}`y@Rsc6tdlH^fv3Pv!kJ{Cj=K)lag8O1*g%BTL%LSj?E|fO%Kn=JhGg0 zR?Sc1rH=)1v#1;%d+{q5U-N>@B!Kh@Z^o+A3J=pKVAP9s8pYu{gBow;L%X;&Q4`h^ zkIna5EgM2j2Og$N0%+0=^DcV?&!{@kYZsihv-wBJz%;mT7ovhHbp6(%XLTC|=SOB? zxU(Vdhd8g`?#t)(Bbs;(H^lK?|F#fAhMwne6@;T!2cDIJAhK;k$Qk+3hY~5jAAVEn z*Z)Ui;%;W5`TOuA)vqUV1GWoFn)ehHe7LB9QM~&~_*5!92gUnW)@esQv5x~TA3v}k z4;-j85%L$ounr*)NO(B61HN%O7r1jA>l}AFHfs2AY>s1{i?QaK=Rv?bWb^!$s)_d? zSFFFgpP49Z$DOJTIsFLT&W{m+hHhuj)|%Oo&rjr<vDS)S znULe}8N>7UR8*f$ly&^P&bW)t4BfdU6M+I({=ij_en!G9|qJmsCv{Cp-5x z0#5fW#^|1k=oXK?=Lr85Rkj4-H>sL1PWbo&#Il&_fLUXM^v1tqjPp$~=jmJLXn(sZ zXM*CaKYbejQler@{{BsfA|EKfpSgoXR7>(CisM^3FA$WJnx&wE2kGwzR^`@!n?m$Yctl$TCXBOBpY7zt zA2LS#L_~b_Fkw#U*QoL&sJ>d&gmJ2~PcU+_j~XL;BOx1B%X4)5s$2=WyQ(IP)4c}P zE{>6C@h)RjkHu8uEps%#NtGi(^Bt-tjME%F^_UJ&{@57L<1x>$rfTPj{)j3|g6I#b znlMhZ{8>8QiG0-<<69EO$y0tgjekXzH$nZER81JCo<0f2DPQ$CQ!!yYNJbAB<*5EI zRh|UZ|EX%iIMtFznmFI*8{<2U2Gh@>e{gmKbkQTSud zHy4HTex)c1-wJmgG9cWnYQi|x`55IQV?>Wd!j{J<`&2m+ihjDP3F9vQ1l7N+YQi|x(j5}7G)DGRLU!REiC3s{CFnk(YQi|(#Y^_TXN>bL z3Fl#2DA2 zBPq(Ed{mVwLH37LO&BM;a3%8}jWI3;%hI*XzgOi<(Eepr6UJ$mu4S&7H5C_%SiEpG z^K`iRkRjnuPg%P2nZj9PTu&rJr94;oxZ#ODSJi}ZU6-t9{x@TcpCK48U(viml{e9) zT&8NmIQ66awJ&VukzTiN8{>U?2YdI=&8ZvbGAd0~z69lqswRw6F3qUC!5H6@5#NRR zl-H^5>$Ur)r4`X^P%BcJ57b*V-eBv!0=4C?~oDr z8LB3X(_9i5KGzuEn`6Gq!@_5)vL^KXOjQ%ciI>JG^TsI0zNG6H?{llF5+%64LDhtD zu8TvIR~aK52TMVCL4@-EsZu6bf4Qm&BRxc@2V0eIR9-`6UI50 z1t_00MmM~42I($}PyRxcCc*TlRZSRYIv=0>A7ea^a-QYk$+uJ~5-k6VstMyPOGnA+ zU8VxTi5>Wo%HmOS67D=?C^)8S!Z_E(qvTVJ5k5l*FBl~+P^C;Le2=OLD27Yj8VUhQ!lxHE<<}vl|In| zfvO4PEg*|i-fE2Xan5>Cobns0Gzq5PtZKqI)A=~%z-gLJlw`Y8p zCXBbhI{v8-GG^{F#`-vCJ-TOZoP4}0O+xECR81IXdW_e)dceL1B%$hOshTj(vn*3pH%4^Se-ve^ zELEBW(=SmqVVvoFrs`H>Jdbig%QIEKs!EYy`4&|Z##vq;J#S5BlYd~0=*b=Ixo+5t z%-$Z6f9vKuRhbfG|Bk8&^JK@fx^%dApK-F|yXGRj&#E#d$i7$AgmJQq=aTVVrd7bC@?9BRq~$ z3ZKGUQe{ezeN@$iakBG)_uGsyJr;E(^2qzYt8yf0zDw1FahglE+JDj*-GqQ+8Y6m~5nZ_ZWTz@mLeb}|nlMgv z!S@>v7$bXSrsd(Q3Jo)tK5=Vo^6TwZ$y?muMUw^7xEak`}$l4lrW{EQ6ag}IWa zsxl@texa%f=)-xZc=4V zkbhj&gmLm^Q=qpS<9$np_oB(rZ>sVoD1VEp3FDOK!^J-{MtF8el?RTWP~}R{{U@p> zjMFV$!~BLZ#wT|8wp}lNg#PQQLS1;aNbrmVVrZ>%G#TZ(LK)TE?QcO!0u6g@Ta zm#Rbwu0N-0!Z_E`jO+J}u??eS%y!|UBi~geOYr@FR85%YJ5I%=A3)oBp{XD+#dmz; z+}vg}+fTsW0N|fOGi>fA!b6uRbI{hi80>T-t^YWb4PgN-stp8ZmgmKmjb5@TtM*4I@ zdU3w$T)6v?QTSt3O&I4~man?P7~SKX?xI}PWvVm@bziJ%!Z_3NTvgK;*OO_kWqa2a zRjCqe*HujzXIqk|daW_4qd8AOj_TJ`NfJEYs%pYG&y#-OdUh4V;vRhDhU^x6uQ9?m zN7|AFwVRzJNSSx#-pCQC^PuKRcR7T-=}KAIMapu+5W>A-BDap zx}WWPs)Py7|6SFDan7aL*fXDMD)^3OJB69pGvLlc#(Aiuj=gXEAkX z3eE~V+papDTB}!QCc>!&r)B%qz^!`re9vjs@#2ZamOBr}*y(=%_5E;~+p+89=xE(- zTaZn(0t+tN8J=EaKgK-Q4XXZ9d){sJn*o3f*B#=2-O=B53!cL?b|4)0o%xp2S-g1O zJ!<#G-Ww>x?O8o{1q+m=lZ%^>sW2Lr1@Dee<0lG_}AL$b54dM z)mj7UbRXt#gX28xZa(|R0Zey=df`Z|VRaU5-ws$k)<=IPTIO*^gehy4LMtS@Xl-!_8v=t@#o7jn=yPF*tOf`6u{qIKpap z&^y?LYE1@qyW6q?`x>Zf+lM3L@sW%1jW`FIA7__-g8j>C-uxu|ezN%~_!k`f)9goY zzKJd#)v9qr1U6iskUk0<*4V&D$-8FWj(b zcR*_P@qY7%V7GBeIN^62PUrZZmRqx0HfXeZoo97?;1A4P=nr zWlPl{I>CmL=C7bI*MnJj^-rrGu6+Ts9`tYNH~$l^Vdc%A;BP;Le{Su+x<7wwKimKW zaU6hpX)gJzaY^(4!D%})*M{R)GeRrH*4I5vSce>v<<%Pf<~STIGC6jLom9ETTD1Ez zyb~#l1Y~m)GaGKYW+`a6oeIHrYrhwcqL&EP8PyTrukJV3z}3yQ%mn;}5F?!M7g~<>q01X2@#OwRu zo%I&$oBGXjz;rlqnR~3$a;-XE;q%kN8P{_bosQM2GV|4*=fLg45%ijB)GdW$p1n}5 zxt+j1juH$vLsy9;Lq7;|ss>KL^&W1e1$cLahBXMc(6YAG?LxnWCEIBUGY6k%NXoa- ia;w!!-7<4AMaAmU zC&>yyNU~ZQB!!Ci4dMYJhai_I5&;pB8x=%B0q+|R^!FasZ})W7h|l-T zZ++`|pXYg>x4Qd9yI*nPt2^YsY(DO~UT}Wd3C?;^7<8N<$+OvGspq$D>??XGzv|xn zLVh^gX?MH%YBt#ln@Qw2EicNm>sH*bKSx^4o%ZkG^YC4e741+aqJe~>MlC0DS~qxL0Fcklvt7$kER-d3;v`C&Ns8(O2=h3r z1!)Jj!deTXj-5!w8H<%HT=DJducb!|N@#|Tc{X;Fx4tZkb|-ejN6Gjd;#C$+Hp7;4 zq~V9S7un>ixNUse90U2L8zA^sTe{?$!=W zUl5Qj_qzw&>qW~ksD-%wxqD1?9g7^_v16xKusaD`_D1jRgnVnQb&`Bl^%(c*M^lub`V3!T|4~p*vE$6%zH7>@H z$+W_!T!ZcYs^5i42x2}4-SGY^6?W`9JEAYtGR~MH$-E!9K z)K9D^#68x~_S>x%CLc&;gCa+;Lar0zu0EipiUysql>(yR(yN$H!lo5A#LBeLDe_F! z6~k3l?1GY#FLbXJc5JV>NtdH{2O1kDL6|sJprvymn>@5L@$Y0+Fe&z@U{ zXOLoMq-;bCLE^1>!uO7)!Ra782kX%{c-)ju+WnL&9GAUPua%=j-WE0QriefCtNM%9 z(aI@`^z0(1=fYJ}HH&U2mYw$Pvci3|`xrH%VpaqZ1C26;0#;G0`A#KNVH3p&g7iA> zHI3mjLHO?e@nJD6ta7Z*Vj(Qb>+ION+&bsA#5&rcda0|Cb`z^v*j+F1y#Qh*IaLj< zx0E}2Ko!qQnF!IEktYqKU?@AN8U%N+Lya^+h*i|g$v)>IPlN};U%-J*A_nvBLSx%k z1G-nbkJNTm#z0c(is(sI%=cCFX`Zr?s!nQNwhy75jB}TkU=LH_1Ek;8_Eg7bm_cGC zUsDei>J5022{|C_ZwPxJr!Xx`*6gz$Am~E(vF>46>t5wvt^PRVK1#Kpk?mn+BYW^V z_bAZ_39Pi+LRbnfEJGPNj0zEB=w-vk5{Fxo#i|n=GAM_E?L(wcg`#>v%2f7W+)=M7 zjh5`H>Vz-vItUD*1gD`6i#l8{Y|yfhkY@{UrXm1@0q;iI$BW4GUOf+WRegdUQ1e@q z6ENkLq?C=k`%Su|iiZ0YPEJnWtgi1RS`#lSfvQ3TkCxtQ42&cdTUCe)!V;3=_R49P z@?KoMYdJxTtQsjGVk%f>LG0L3(-pyG--N^(F8b>GuO=i0+f<>%$qAWQSI=j^nvV#8 z)Hvwd5x%;Za8Fs$KsjmMQ>mL+Z&0D$eO8#mH0h=y^eQ65-7+%Nm#5YdVhu!4h=CBc z3;!iAPHB% z6AHCjA;wAEHf*Nc31&}olcXCzb!Da53YOb(%kjOl(Q@DAlt1@OPu_Kg<2uHQqSG4hk{VO(2V#9vmrsteEv9x;h)*F|ee&el2 zZ#i+@(Hm}f_KmmS{)}9WyvKA^n?3B|?G7y2iKDAGKj-Mp=y&48b-uS8M(ahNX|e4; zkCn16TwgZcZ6R_-iO@`3`i!bByoe0M+&i=B!oe(yrv15xzO2G1;1Ps4yqbqgw$UhY|mgb{j?JRb^q){oAa>gh`?3(0o5Z3yS{VQ z@nKnHBr5iZtE$DKYBA&w`o;Ez1|vbJr?sFu$~xg;5M+yZL-wnyI_%Bmu1nY?S-w}5 z_xYw?1SIR{RK0R5%E=ZKrgW8z-`e>lJ9a3MdKxN7=v!A(o`yOYLG;fMx3dZLlHO7G zfVDx8*gS1^Eoz{H!VamLB7EythM`7s#}6Zhq>C2;yBVWyB#B^HD7+l|fGLlhGsu}F zmh0Fud@lgb^*2$wY3zn%@4l+U>o_p{#Vziu5xGCH9XP&wsFNKu5eEtTFU9RQBk39g zBpZ4Dm4zx_X4*wqF0j2F_~&8WY=LB!C>Wk6+ST zE;Z53D4|xeOGRnNJ`L}yv|6NZ@K*?zshcds&~)EXp0$XR+Dk%tq$8TcqVn!yJy-VTRQZMsZyVZ-Ejt^f5{IOZ?^TjYFt9!Pn z8(u6$P5IjF&E0^>#*6HT)~G#&hya)9B(~>cJ*EYn{Y7mv3XzTIB~*GkAcjZ^x=mLE znc3LY-+1IZ9{IQzXd_*dO#&)WuX103;h-q8?YzjY!l{?l%{DC+S)X<`p#Cj+i?cBVx9fo| zE=c|ru3_$tFhpECCY+uxtoel4MR|dU!Lo6Y!axcY8}FJ<=)!IwF@R)9;zn4fmP{JL z6U!;pn>y!*OR&yf3+A$Te^X|CEaY|y1ECve&$zGI?W*v0I0qyp%*h6&7#2m<79dcI z5Gh^9OvBUIOctv|bW;^lmqg0Hs5^kvvDdYbNDOR|K(-oo^E8r|v0GfxmE8@b(F05P zo-{ZEKPNWeEi2jR7P&xu?R%j9Sd5O}MKBPwbgB^P0p!e!8kuZZ=)El(BB)qxKXGKo z4&jOyO?AsTEAmGo63KQu!4a&wUbFqCnI5i4aAcP}(KM@@W@@|E6VNx#=>}90=}mFI zAdVg6p7MwhGHZmOsmzI$iHV$!NL>!r?=yW>?ys=K7keTOm?j-BKrj<^EFX~yoQJ{E zM=gfKSGw+=>h9jt`_JO8uAMyb(!F^z>ywPK?t%J*z4S4J>DzH?}NaR!GDo2L~gEm1tG*j+Yezv87tUiKb}T7g4Nc)6rV9ukT}uzDR4?*xWS{mo9rwvL*#UUsl8- z6Ou%~0ZDZ)7|3b3jjWCi(r#Jj#gV3fCW68^Z48#q6`^A6_-iV&a1j|u8|PhegM`31 z+bdVAhip$Iwz$|43VYGQnf609)5@p2%HJuHiCpz?e|*(M_UoX)zzK8!bAJU4(y453;3EmHa&FtlR!k0=b} z=U(*6cZ@&Yg++*!`;-$Fa36V#$kV)!PG&`KlXzPKsrLoQxNjO}-y&lzN+h)+{wf)n?59#jka3(5XdnMyiJh1q=-t8huc*o zsUUa5tRc(V&+lqRe)4YJqsAQK_t@?8dlXf~A4A=g`dq1Q!T&YrF`cIxKz%D4>iQvt zx)^l`_BT;UnN@!?RRlZii8T={tszxCfV<0vdsgE%;|>AuP-z+9ZK?=%;L~gH$09I9 zbm|T;e~69wLczR$=ppPcqcSttzlSP<9s4{sPBP7kq@I5IZ~*?B*x(<~@Jl8zbPIl! z%Fo<_*HJ~V--2u`=zYqN+8~7OwzBlX;?vGJf8QgzB6~T^sR6M7G3{gMq|6qe% z)3EiRL#V$(E=46hGRMXAXA?(kjGBY~{GaQ`8f1u>JA(Ad2LhrM;?OSpb|6CEvg81 z=tz)?Ud>^K`*kjyVMS{ZvW@^utsU?4av7P!$h*V~dY7@WD{<@7Rd% z=@B0od54)Fd7Hr|WcTMmvr1pY^<)C~9^ zqKaS#Uz$JpO*Z89Lrr}8I=Dyg=7*0b-$f;5V85Lzf*p2s{^Sd6#1{sMtJKYqj{hl@ znZf>9st9)MmHCs2QLdB=Gk`45f23;PboS-r?m~WwqV8>kY6iUR#8IBU4DZs%-)pIBzm6Jj3Q$?_& zE`1K=6>P9)d$22?M0q)tmBIZWRRlZk>IWl!jg9%99`k{xGKQSw8>!R`_}5cKu!Ao@ zm;W>y^tm4N>XZ3TP-z+9AEk<52VQnE|5Y~JGdC~C1qg$D^&zL>`6SCj0gY2 zz~30y(O)^n6?S8@qrGO|&_gEW5g31)arZ&02zKbD8~PjAh#S{gxuri!Wo2~y8B`JM zxU0AH>uk)8>(tCEHuWtkH3QzJieLv{xvBpyHtvhYy7WDeF=Srur!q6xUqThZj=lV` zh+kr3ZX%ZI2P0lXrDcG>iYkH~c*%+VhuCPFU8j0ZU`V%rm&(bYejil?JLLz8PMNC6~PXAiv#2rvJu~Fh^udz z*2mXFZu1^0H-mnIDuNw7zR6pBL)_3a;XlR3eeRI_?wNb~1|CBG<5XS-`HxUVup=+s z3jcyApRo}p4R-~*}%cJQS!$`7z17q+AuS3g*oQ$ZQl-$xa} z&bm58c`F-m;VcdCiU{S+RAh$w8>k}KsaHlQf5Zk|?0gjT>Hy_aRA7er$EhONnU@79 zJ0`gTLVTB!z+Dxe{3jKYA^mSu5$vRgm-N_x&kq2v@RDm(WQKZ^DuSJQrI*aupwIQ7S9{5e zRA7ereN++b%;)3N_zG^FuEWC2zKVByZiUD@z!3m@|ga&sGtn%-=K)iP5(iAC6H)urtT6bg19YkiVEU z;5whcMtwo4`ryz**8B(+n$h*EsUq09mw)!*b~fa7g}nNM54TZK8QQl}MX=K@`QXD@ zHq=uZb@?|H##Bg#bC)WDopV{H>XmGu^UK>`zifu){9@0)vqn zSL`>VX;*)J!GBR%8QlLt6~T_XbU*oMHrj=5S8gaDiJ`X{W?w-S!4A86L-{N=;tN{8 zx870SNM&Yp{1{aPJNC*Q#wl!F7#XRR^Qev3>T>) z*umG&Mfj~=3qS4ZtvQ>1z405_$nVkQHw`{y$Dc~&W_11=s3O?WmwX533)qNH9*WYS z>iaiOQ5hM`&!dW9$6Wff&v&wco|d31ANF|>m6JhzfhvL>b;Ym1{u~?Zx`e&OZ@~UE zm6gH$6I2oGxU0YF^mo{Z&neMj_`|AQU>-H zs3O>5mp!{SGs6`RPD#{NkFHH)+--(|6I2oGpi7gn*RYXRPO>r;yF%q;bo%jB5$vd| zQ?busQ;8YqC#fRXp;sni&$BU~mDU8ztJAP4m6t&tQAMyLFMq=1huL^5 zM_K)V$q!OV8Q8C&ieQIb@_@-Z*hs6rWW~2?{yLSB!Tf7f5$u@Dju!rejkAhJsty+Z zm`cfj{xnqtJLt-TgJp~9oKkNQfg z2zKorK2-Q7HqH|=3@JZSxS2}Gn2;N&BG@5saiq{;qdl)^Z+)Q9rZO|w*Qp}dvG1~@ z^})~Nyo`^hO1cwuml9D3*w;d`j84DRouieSfGx^e$iHsX6X5wG0Bzm7`G zK>sDG2zKbzTlkN$kv~{YzT#Ur{(#EP;QtU+1Uvr9&HPu`=wCWazj|B$&s1^-{y$Jf zu){Cg0$n!86_oGU#Jy@WbO1BhW@NsXDuNyPaJcwPHsG6GYI)%JbSf*O;ZLE8V8>m0 zVBBCMUON;!POJLsAuK8=!}?CD2zJ)hN0={V13s^SZ~aq^FQy_h)L%#y!A`yW4D;1& z#OsQ9^&#fZQc)S&KSdS6PP^<7^ZjhBrxoj}Q_P2{m<;LnP(`qlF5L|MH5+PexhwZT zU#5aGtp9>4f}M5s9%yc!D;m_6d+T$|I)>k7C^$tG!A`w$>+>Ww=yS@TYC03R?99s#GQX7#xb~LSCz*FpQ5o9bOclXSdspm+=LWt9;x0DQlS;Mah(l?^b5ulz zazYisPPyc~_($0=Pif5MUr6vnR7i&N4^Tz0b1uu4yo(LAj{T~VByXo;GNj*16~Rur z^2GR0*>G#mxaE=YXQ{vp^FN}BU}s)>WIVdS75?dXyYkFU7Fv1IU8+w$%1y}r=lLD!ZN%wst9)8W%<81vf-XJxT|u2 zucx9iv|mdV!A`q02lxp#+iSMV1VCP<*4?M~S9a-*5{=hEH$!XB;&u%<{??&N1N@-`#C8dU^4>x%C;`Fb|mGm7>W zUvP3Q6_(NMCs0MO^ZuWHJnFe@;P)%=U(1h2eG3(zxdXRTMX=w2@+SrFVdGv`+^e4y z+@PW|w9isSu+uKf6#h6H>uJThDogkyR7{5SE2$#bNtb2`-_3?vd(FxW;XA3I4C}X1 zMXj_#L9fmX?#Afbj1NbsBG{Ri zX9gd~23&i~>a5^lDk`JlkEV)Xr(Kd2TxCP8cbF9!!Q)g&hV!$iBG@_C<0On6OKd_? z{AAX^vzceufG;XzvT5w0r!)g9Hbee2RRlZvvRrJ=MqI~(Re9L&qhd0ozn3b4opj~X zgKuWTt-a-zPY=F<3d}J76{-k!=B2ssPqFdVF?eO(`{PtlhV@6NBG_3E=e_@v4fLe4 z+~qm%f1@HYl>db)f}QdfPY*8b~vC~9#1B!tvRkY@<_Z7{eRT4Rl>7S>F1C`;ikwF3`7P18w(f@wl*#6C{$QTXDLGDSpmeeoc5Gx#?Zig6!`UTG z`7#xSVPeG_okr;A?RzlEZ1i^Z?xFmudqo?0cUQCN*lYM+uzs`=o^^8b{+Q$AV?nFg zZm)&Q#UE2Gr|H}3i+}KTtQ9r`yCb^nLVxi(>v!T zHzjE!8w(p^^D2nBHyz)RW)n@<4%VI6N#vD`>9@W5@ur8*wcy)_M78LG9=b$guNCir zb4`?_+0JISD_XZZSW)d4f<`;QUxOst5sFsZvG_lmkXL;&I@!f_)f1=F_3gyD4VT}E zQ8HbW92MWawAxN(*R%5fbMpW5`2S*i1OH>W-7R0zMq4a6eVy)|#q`HEsne-EPP37= znEK)N1$2cvj3H^5LQ7DlDDCRliQC_z=Gw94oJl1b(N3!7#aK3XUv6Zze^`oCVs=LO z2?|rE3*$uS;NeW9G}gnefGs8J$&C7T!|~<(vhj5%fciP-^Y&{Y*FBci;;!oj=a>Dk zY5NZJ*199IgBbiE=C&w2zmY$L3ws?}XET;>-?d>Sx`Bh5_I*NG#~>D8GUeyl?pvhv zB)>OrzX5$@efy2#uQ%bp`}6zq#{GFV4nS%inDxey-aGeL`}JsccvIhO`c?^asapCw z`lOG>jvY1Ky!~4!tP(kOyKJ;{o4xMjW%GScpTvPlw%;$sW(&7%ByJci4b1I+DDb3M zH-@@It$E*5ok;j#&_YAkK4vMDT2s~HA~b6%)m+F1T?e+;cc diff --git a/core/dbt/docs/build/doctrees/index.doctree b/core/dbt/docs/build/doctrees/index.doctree index 3acd417b911278b24a5d2810fb56f09df6f5612c..19f1fe1cd87981168f5032123ab54f51ffbbe80c 100644 GIT binary patch literal 87716 zcmeHw4Ui>Om1YC|SJhShQ$T|Vm;zGWP*vTyxXbhc!~gOlEz#Y9h~gvby{yWtet%@< z>+V8ek(p6(UMQo_S=_)4G|*slhILqjnP3ey9L8a0aRMjMP{So=8J95}!!;b&VH{?5 z&zE^`o_k-ue)Ud1s;h!&W0Qo%?_Xx#zwQ%j)|{{g|7&lFnto6}GUHY% z^+v^uI_-^>M!8k<>T&1eot^LR+}qjF9tqvUQKJ=>y-s@#ol*9KY9;jQojW@1%{V;{ zs?m(H-w=7_IB3)}bAO$`-rsOXXNN!1UKa;()iZu-pIMoWr^}7dn~a<*Z@IzI=x*Wz zc4W5pfOiD(w9nROzjXg6d!7GVoiyV0O_$<}JN~xzNJ+cw_$Rg3t9tBhudN1kud}x` z>$XSTI1Yo^R!sFreNPH15$d*~Ty>*}3hD24b+?xOwXxD_^1-ZIJ{Wp)IH%R#5WDmE zspD_)-{_C|XZk1kXSJ`|sdv}$|(vynjmO_VkN&E)Z1`tL3D-z5FFlQQC8m}Fu)J>8$8YOJMd zjI>9HAa_1=o4zTA^1fi+kDWSd;(N}l7sbwDchRVpt1IQqg^gNQt(+jjsjbLM%`RhwKzU^6dsn07l-;`H zR-=Z~q>CkyTIdl=^(YQo<=Cq@joAfi66hD=Cac<%L&`*Whl6TWQAYHrli??dJ(rB< zoNB|Z1oe5R)l5l;G%7)Ka7HI=rNiLN&JNC!&g?cLvm@>CY7l#&TP@928|8zBQf_-6 zwbgEe>tl>xgA^;4>N53j0);S8fPZ%q8o5s{Zq>F;(E(lchKRf+P7vWBkz^Qo;UW5S-CP5` zm=TS*}?89Z{m!yL3_$?9G;FF)9D2`O>G2q57T}Ol})E)P3wM) ztGgVDozYQ1+oJ90lJDv%bX>EGGD?A{&RW(*>C@BNc9eeV_I8h7_}=iu;Auqf@eAqc zNkHfH@A_@NI!(|XXSPK>bA4OvF6L0aDhuVy>Jm7we;Dg3=;3H|b;O|0%Rs;BpOtnm zjsC{?fyJgrwQMx1o?F*QU8Rip;^2svQ^vn(^=+=cVa|!Xc*;p?1!bY%5yfuoIfvXZ zaA)a4r{=_=#-&j`lyp$)kyoYr6y5HU!BEygc};wAy?!FDmvMbc)|0}rA+p%VP73TLb?aTZ796pYAVZ9HX7=RD(WVK@1fQa3W^2G!D>i*aQ- z4m3(Q>`O#pgTA-HB2Qn}1*$N+N*VV$5iSS>u0^1;KW;RMFb{JRH0-7Sn+s?h?V#qZ z(91Yn`b3WFK*6{S8n>i0a{igf3VD|jIfh;b2A0$Y`I<|+ki)Nv!UvrJEQY4@ZV@i% z1g=GA3vF?7Nd{hpT4cJnCq4d3NGu2Sa_C{g%B?yz>UdS7WK@LIEKY}B*{zmaRkYf; zn9Y;pvGN_eJTSbGl>fK+=EG&vPedgD@h z)5DaXy0Uxy#I<=;R2%5AN!5h>(gVsoP%+bFfiXK}17A*2wwL2?Hly)Jbz9d40 z>VRsg?$USqK^YS;=`;}u4D&EE&N~iyVHnUvKl-+q1j5*e+EdIHVTxsDu~yB*Gn^By z&(8|h$IyW4|7gaT_%$cWi62vLBFu?t_U%Frej@?MIW4VZiLAUdbQCw2(~e1!TnAr0hCkksOrr)bepX=Vm0j1H(6~5n{q3} zx{8BE42XvTlI|5|=)x^VDKCfVw4N?cG_ zFb8y(O;-S;tT9^&XlCoSg1Ix)wk^F4nhIgD`Q%?Jw)JVBLtmr%kb`cDf`-Iuu*gfy zzR@jl_q$Jo3Yh`bl3DMOIraT9VUa0fiL*El?Sc^*@Qmiy=su>;iEsnc zA}(mndHY(y8W>z}Hn}F2nC<7uxs>g-5lx8e>ZJ!9?RQ1dg5__+BF}QyWQ@8*yGj}N zWf3lT2Cl{P?)_9R$91A~$%NK<(XOl2!ipQa;21@;9$E|PZbq@(kmLSE!MF`}M-{tZ zg_&J@lAPIf$z-yoRvmGyW8_lS@f?9go?Ek*sfaa;81W1dB3K2Y#p>;P)?9DGm=7?B z<$fYv!yq*s<0gI4g(#VwtJ0W+E_7P6!JRu@KTg(m)eSFewwDuvbFrgfE)1opnf8=r zmdYjMS=tC}j$+y$L{qlO>}8}i(_PHrUm*$~4Z~%y$cv||30SIO*ek+?gn(;F=pvfC zB>`Zr1k)YV>5H5ls5u54VdHSLQ@0FgC1%|yTY@NH$r*c3!N?85Pf<;S%@s3WB?u|= z>tot};AIo_`jF#}MR9}ayI_%LTGUBxWA}+1R2-r)j&PhE`hd z5);nQLj_|sm{r|3#RW6^D7lt0x+(QiGA9D8GLHOlQRLwEv#`i>Yqs}&Zvjt<5Wy=D zEnXL|Zq(<3d0J&jTKdVfy3?rcBuzloX`0e)(u^hAd+goO6H%IZJ;y*x{N{WW?i!QV zTy>yy;L3gf;o1YGTdq8C{Ul~bgJe)hUa~vH`|Sv5de?}7W_X8;g8X&GK|Z@8VGf@u zm{CK0YUF3?h9&p{`Iky?L+ppNxh}hpWXBx#4@6-@df$UZUV0gATL)D=)d<2lOdFT65j0sAq`OJFTx6vi4rN`{aaiQVqS~o+K)baFlj?h21mh zYVOMRL*uztwL(ibj_aVJw5J5y8fdv^e#V)6Uv06Z{qJ#-gKEpWcT%V04_9N<)FK`w zZfY5g<+Q1StQn$K!>>e*tmsS0v9u8!Cq5%uwJeo=kG9(Co~>QVin~S>I~v3*VUgFk zut&X5gbSqu*HZfIwe<+MW13l|rz}(jothg{&2bZ^UDtIQlIaEZtb1)Q3ngN9c1(ue zT`gMpO?KufUz2)!Y};ojqJC&F5>^j5^1Rbf)tv=1WT@)owC8Sf&+=*#LQi?|;}^Wy zu59j2>n>(JwM0>qp8Nr~;bqI1O3~RdV}C%3z3>~b$ZJ_NX6T4ep=6+1N*-=SQYB5d2x=7B-e3<~G%Cwd+Rqm`Zj{m2k_@T?cgT)ZK zd{u-9T>{b4ROeIJk za?t09f`-K21dF`HM0da4B2=UbsFutm(_gCg5);nQ2bwB-W)WwnB0>a{K(v@VKK7)=z1L>MJm}anEXbOn>CsfvU1QG& z$Z=0a^Wb1>_J9tSw`dMZk2do^u*^Ov3YdgF=m4Srb2B}PK1lg z3b@u~^_o0qqptne#22&qrGm9H2H;ZsVBycSvSLn$LmCzMJXXefPYQ7ddx{{W>KmmE z8j)9O&`O@ZkC{ar|HY#Cp}dP=F|_io65&F5z_pY&dEgz_ykn2!>}@!{S8dX6gd_Ak zak~2%3z&y!KLQ>f(g_&&e9q1-1*0_BozUzWj4-!%5`dK3Y_d?_7WC8<$L)*a2CE({ z@~nz(3x`Cg;1sA9r^C(rse}yYgtPdY1?yw5pWRe6M@)T(yi2LKXA1RAMfpx=fjIm} zMBzgOpM*tT1OrbWeqMwK0RYhwz^a+9Q^{R5@i4D{TrkUqEVEmVB^}o0vjid4rZrip zvS*cX4W1X(0LuI(EQV0#iy}lQ6Nr{FS7H`VrEw*onXR7`%$=dPle3$k@$3xC?NvgM z%1u8Cp;Dv|IqbC?NHU^6^$D=Zi%fJ6oDiWxXh5}u)_Z7F_{j*TU_=Ien>Bs8XCro# zb18HDD1_pU3z)r2L{X!OcpEH+WbZl=D%b<6#a_Xu8&oO^T65kG6s&>4_Ewv1>*|pm>u*kDJaAWY22ocNz(PH-aZfe@s%^d!$U`7r7W&Oq8 z`>EG#l#4LiKdl6q9dqCt$*ru%xDFP1@fo|P6`E1Pc-uvIkRI?X=`HV8XBGVNf^a_0 zE0_;MIN5wlen<7imsQ zHPe-~&z@&y9wCgL0^^MnynVXj&9>(CYD;%9w_=|b#Z8JDbkoErz0sVWYBOKVVis`( zPf8<_oDr4lln6VBp^Bb1YYr&qZ?2KVDB_nILleu8{Ui5D%6 zogoSy5;zSOc?k?W%-kVDM7n@z(Z6bzqLh+WO*|~he_JrihA1a2QFiaFzm5Q;%Cr{2 zRPL-Yj(@Kxe(3ThSPY@d_lOXoOCVahT#3~wmBy8TX0{dz=FU*twoGk3cGllb{-t7D za{lxIQP7ase}qL|Vxqp!Ln2hj45*gOB$J^^9ugC#^}iI1)u44Or8PHu`UJU_GTL_z z;we$AVDrDhBG0Bd;h?f-7IAi-5g~#}AX-cwpMyB&a}a-3kTpZoqp7C5W*}Z7$5KTL zXCQtoiX9sNTUg{ZE}VgQMT85b1J_de>wE@c{V3&GGzLgBWT@(7rK+RMKWrz2p7P@P zhw-cRmsKiWGkg6-S1Gq&ZxqE$Y8rI*VSsI!Q&Q}Oov_F&S#fOQQ?d0Jc7BcAOSQJ-u=80_%uv{$!XmFQ(XH=0B2=gh zsFunkV@RbAi3zj%!-BCItZqtJ&5b&LL7t_A_8oP;Dhd^3{v#~%WST=vl{~YElQX)B z>_t6IAX-EoA9Wt{QRnFeSu-R(l1RF1(7A*B_7rr_Q2~9dTd#Z>qXLw&zNSTylA^6q zK`&T*p(+oXlrrqU9}2clr*we9$lImZ3pcN|0-pSUhR64eFD``@t0 zD^EDn{7Vro^aor^e`oKfI@9hS`Vt3i{-Mtfs_Ns_-K}Q!YUHI(<6NvAqne3&+-Xb( zucsxv_8^0IdYnQiQhJvhYn~|z9W=fH7I_*)J(~+es2~xj7KxI9qq_JdCQR-n1!FbH z-IS1<8*5%qo~4BL9c$hq3KeAD42wLO=D<=V&n)8PyjO$>B7tZTd3>yS%*UG5f~*;m z9!VtKHP*a`{Pq-duR&im;O}+wSTiYQeZ5~4DJj|-Yxe2I7k#z#e{xT1h+T~WOsiZ1-Vs3!_WWT<5fwBD0{S=3j^bx*~d{{V};P~Y11mGDV?n+zWm$7cxfZ}Jfv`d5E@kUubfRhKEfmJ-JF zs`}*n;&7n0-eNZMYL#)|GorvDiwj|qmxbtlbg2jxEfG+chq@aJ z#$r&n)u1j-I`elsxtH=cz~u3)C~ELlg2j-$HASf44X74x!_E7ud-8BjSpD}EtdGHc z+6&1)Lh%uDE#+M_dHlPg*r9-r!y>PMfhUh26Cr|qAX@CNn#o!vq*W6SYx2p0SvEAe zEoqOGCc7t(zfS(8!n7ueRqCuV4*xGi;X{^x28$tN`CSnrWC=t|mMbxXtJ1g<(9G7$ z1#@SJExq}X*m_JJ|B~EGwKh)QGo{ajmAz8D*v#G?+EvOm_!m(Pptpa5MP6@rb#+cB zYaV@btc)IQ8V~+*=^M+7Q4rzp~R9JB2wvqhKtCYA2E=(v})`+Abgfsc!WP@KjCoAxHgI zQPfcCE?DH1D(Y%qAwos#2vm!=-cvt?--oxiU_=Ic=~yZEGQ9VaXDMy`?mI;s7cg~m zqL4vd85ToQ7mH9q9Z)Um3ck)xrJ|rU=k0-lH87|iPcBqF1z>z6O@fxFa?ix5FE5G{(A^EGu!2g?b;VtJ}yE)21ZrecZR*fW&!4RS11%93lG ze<_LY_VLpFcFjj-lw4KkbasG-tO9}0}#(Clv zqGsUE4aod|s9c^*b3s!j&n)8PY!xAbNFZ879$(`;=4+g17i7(l^hnY~b**vEkl&ty z<~2@!olo)|13f+6UCavmH=KNg^4F7aAJ0A!F0Lxz zT36Ni*^)rE6rdLedVSMk(9=0LcNUD%(9n1y0eWXo`kgEuw=~IjPaXAH`W#@g`G_cF zG>pmbt1(`s#$Q*8-ST*3oIbq^E1?!V1K0n+182OeGFPd%seNpg`!0*8# zFM)x(#!rh7K|c^J`d7{Du2RyfiHAk`Y{4uWqMS%uqoYhWf0qEH%Cx4IRqm`Zj{mPk z@k5vY6Ba}0@~0w1=n{yQE>~jCSEX?!pqZ^#3+B#H+qO(?J?5Low#vn}IU-cZ45*gOB*Qi(4~YrWy1QVk2CZW$t+}Dz735mne577V*h8W_sYiL; z`Sqg6(R^GBi#)dj-_~vwA%a&RTD%^A-MRI;^Gd;t8tPMxim4m!RLA6BD#7gK=IXl2 zj`^awM-(=sw+M^8^o&=VD>S2o@jfiVgYbZ732%AdL#~2fUJ%a5!v*tU$VQFB@~=97 zoP0|q(|3aEi=tp5mOp?+UM%KpluEc+#Oe8p2oZ7tq9vE(6I921g6g?~tQn$CKT~>7 z*9_GU$gxz>!q<%bP!u~f{=Z<6*SK)f=;tC_C>^+#(qHGV8C$cB@;vbLIA_RE)yZl9 z?$A+QGq#lwr1IK)I1FO1RCDXW9PN70J-zN?ZqiN{RRaP$6&6Da?0gX}(g$2iV1rIG zswREV(>XU67mU%M_>`35E>^f7bqzuEqiG9yWJ{#sxgCF2)&BT>Gw9M?xg;i4`1ZCK<*Aet0-M1%?n0M(K}@69}gpX2&`!H5jnHi0&saVG9*@+>88 zWR5;LUYc|HZeWzK*1j$Z8svQy7J2f7J&or@xS$WX7JbhCxY2aHBVM_cd}0D$nFzut zc2INLdzf9GOP|R3c(Gty28rV!k+aV<{*-)6X&jqtRjZ}Yo1l1Y4pOr?9eQQA zT5eToqf63+nvcAn#j&n3oGv&axYch z0E?j)h@ytxCSj4+o2cc#M1+c_1E`kThFix}x5D9^F!$FLtdGHca^J`=h2Bb@rL>Ed zLfH zQmN_Z6;yikA&2}8QOFS3pTHt7FwuSQ1raJl22@LAy+=HSA76N>U_=IWqo6ML^uo`{ zv6MA_SV8f|1KL zCH;JIE#-QNhZkl<(Sqd*VUcHf;BLvKB1Bx9K(rWM&L<0$43-muwQ^&@To_tOrmvE@ zI72A6lV7P&mfX=YD+(4;DZwHy6;V^(6rn;cK(*u|*`A{4mzXe}_Z5uQpfhRYbNg97 zLXM?;T6DX75wsoa6Vox zm=8lV$$UzFFUv2@Aof zLsQ!lO&w()%X`UxPjT@chwbJbhf<@Nyf3S{p3#Is2`}7`iEPqQwbT)31{({g7q=jKh35H0=vKvW4x;d`*iOJU&Q z^^Zk}kO2@a8LXP6Hl?Ul6Az2?m4aC|#Cb+fady9aZ{w+Qg<1=KDu-4X*JCTWAAFz# zeU8Io2z{O{LWDknXz6n$R^n6|R|1;Znktw(Lv?S+sjkOz;H88kZ5EfDL%v27JfwCd zEb>wl-4*wVP$4&M2pemb2G<$Zl+U^HACB7efh3gn#TyBr?Pn<&ENOH3pVw5sjHMP$KMl$ zOwt~7*qrSSSFJ^M%-By$u^0Xb7WvCc)L1?%LdB&8RO`~}y?3JUZt?FFjL4A97|8Ci zUHoNo-BU1q27mUJVnv@_$Q=Gu6g!&6zk@}dL*b;}t0G*m2waQB724^m61qa@%-ra9 z;z2b1pJvcd-L^z^*0yvWcAi20rP9mh8FZteN3$IEIij#3xHrKfFE~-7v0H=+xdGLZ z+i+6`Dj~x;;VfQWus#O)X_KP~V&34sZHu|om-VUbtBz|##C5hB#@^ zEm^L_P+z5SC7_wD&lSv_A-1i_-H*i9V{+jsaxc}I-dwEmqYpXeuZUuX!u~rf@(L5( z`koV^LS;a;RMxw{TzIeX_X|d3aJMPpF1OkE$K+W`8}BVv#Bl*r_j6Impzi;`Vo2)N zoJKxGT|A&#)D_$=tWr_Xn)9}~U=0ka#|^5j-Nt8=Zz9^E5*xX-COz!lm`hf0s|~k8>$ep*cJ-nzeaN{M(C0on zpLpojHOc=J)p8G(|r{{!S$s+1)c13n^(7FzjjSmd=Lx?evcLWN#{ zYUxEXv#!QFx?N?!ur*`0G;CT@u zXa%B0>+v1s){gQQ3ue?1UpfTny`}u8( zC5*TBbg~xpA%SPE*DWs)#R{!V!XmGg zf!8fB5h0?L0-~js#scOv3rkSxQ+j)D0C9l zpo`*5yhHC%Del6jU@^3qpAg|f%)qt8{F=P?PTk616JN~cmkZX;5aMPnpWKV|o+IZy zMc8M7J=+SQdwKdbZyfUrqL|UFeG3+OC5VQOFNsj01E5+u=sne2cx%MZ3Pxmbw>9HV zrdnC)R&6lhYEfT7I_s6JV$V&2oVYZqNRXUvo5T}v})pEUEWqO%Z4sb z&DtcT%kH&mpD?82wAQ6n`m8dpLQ_-)NV5iuA*6Yq2ocf*q9x6hSTI&;TnT7qt6ea6 zhTu*y1=nM}`VoSV>P;_Ds~qV=j{7lD+)&x4VUbsvs1x&~2o*{Ls-?8votVPw)n6+Z zk-^`VjKAEP_4DLiN*ymzEAqI2>HDrIY|!^@SPV(u%OX_J2ULr`f~(Fd6$Pz1Z@(y5 z1B32uU36RP)@#p@6TZZ%<`}t@by!DWk*9j#uF4rAM9>UGi{|BAa#UJaP6!svj)J)` z1j7T=?pI}9Le8bl!;&kTSBN4;({LFq@_G@q?0ZG1&m(Ri?ua|+>m!1+KLNP$J6mxui>6ouCJyVc1 zL)YV3kEm;n=>_uLQ`Ec)u!UCv8m+MG={0~{IbVt26GcqQwpIWbnxDIijgK*Pl``z# z4h7q1%6W-}hgYPy3;!1u`3qUNinjiZ_;J+ z+@iyI-6@J1?A-y2JbS`xxGus4f55f)Tl%X!s^UwZ$oaUZU|a@^ zn-dl}*}QnrCg)NfCxUt5XN02U(g#+MbTDvf;KK(YwPAd37g9 zHm@&|RU`8FSI2h`dGx+9I>S5CtOn&ErgxxeFZ}(Vl|g5=zgRGXhHg(wblXF2Ea0yZ zid4V@Je~WjD0oQqPhpXls^}K-9T6%-2UJUR!!25>=4m)5EP)>utdAjq^ahk)_5207 zmhvuI^?X$nI~4Gbu*fT5;HkvXGs#}mi3Fm>{;FAJR6<%c@vtUOFPLRRliQMJ=_o6n zJIKFOnAYm1N}W~4;qMlO4_QvbVhCAYEOKLBA*uVE2o=-;)uOK8TBS-wL2J(2mkQRvpnBY( z+FIp&hI~uOUSgH=IZ?Qv`OjgIr+MJ6$@fKwpcsf2#ml(_sdTWM5G2ff|J9Q~TJNC^XXGNGM+Mt$?z5zek`7U4ocz_k=~ z5q%t)#?H9VFdA-EX+*sPHOGg{!^Yugr+&B_4|>nKQ8qzdz>+g|X2Hk}zE4gY2b(G$ z!%h)`l=vl=wl5Zi4WciCMV@HUZTu<`Drg3(MYCj`T3z@O6DIbSg0ULJrZ@Q9vi6-?czr(lK*QB980g^hab)hnI$`deN4yi(NpcxUJPJ3!sCw>>iN)$tWBnw6mg8XD60k=_nQ z2b)ru68SzzhokDS6;|nJJZ_Xrvu^nyekHhd6j!*|X-~{G8gac5d&ZA9ozSMYvz6m| zt5!NP*9dDJDxooW7&J@eMlJa^6}fz4T0*PBNlEE&5c{Qa)r}$wnGlshRxlQN%|;lP zsrNKh#?LvJoP((%`~H`DHc zMv05sCk7;{@k*=Kq`eMGc$CU9K@7uNg(%+?x`(qvlIS*ERivEyJPENH)PlHk^VZ4s=BC#yQBpj*7HK;I zMLvcS%&3%bakNkOfx{fGOCnv}0 zVX4|EyDGJ3m3^1uA>z~Pjk27ikgN~3ngp?#qKj!mhNs$r2T47Ra!{Wm4d{k}-Ud;Q zqt=uPA$_}2TNX!=Dtd9r9zWoXhA;I zrq>21cN=WXnM<3s2iqrk^+TP#t=aa*v^k;cm!z0&sz}F=iEy@2P$aDwTWIPev$=@c z(DsB?T6b#%MaAT8E<0F{OQ@2RZ$M%C_l|`d=ptXZj{cwjxc}LOD{*u={l8Xw1ZTyx ziw8F8c@O%JKbV~IV9S3q3KP3A|FiydV&s3J)gCw5P2k39`%7{uKv8K4E@FLNS}(IC zJ&!IEQbTRFQOyLCoO()EVRrV2<#9x#CPovMe`=4z7&m}ZyM3u&*0e{di77W)Xh1sc zP31`ySjnWUu|=@G{2Q zu~S*mDaq;RFwlKADWxlWigI73Rv2+0=%9Y6anRd=$aaqE`Tl<`)lD@*g#Qf@9`)&3 z`uF?qN1w8(#NnSseN3ZUo$HFOQ=P1n+H27)QGf5+ef!=)mmk_;54RVqv%2s+H2Ow= v+UVg(`k6ZR=1^pCtONYzW?9BfM-dS=X literal 87794 zcmeHw50D(yc_*O1tKHRKKwuCwMew4al}voZ0c;a2T*4(lI1+IPQKouldUs|uGd*-Vo;-?{4k51ex9DfnM!L)fa;nn!lJ zl}gjEcwx7*w&IuD4X+t>@9S>At9yI*;?7Xu&V+tDD0|(`DRf79AtoWg;3c`++wWI#=KLDJMmD z<2+zTW-AYPN8nHAy!7rL-uXbk`|s{19T|c7-W?LSW+4t+7+e~qLQW^i##qsx1j6P0WtS_*sncyVvR)K~g2)l&-)jVN*+t!Mhv=CkE!QPI_x|wCe;?mX-NPLYU8ELteXz^EULf+7 zBpdr`s&;?;Tz6bjD)k$qkYwgY4s|;nw=(UPNwRoYWKVx|3DAQMcR1c*uj$k#p-Vhk z{LqU#4Dqi{)jjMv6CT~+9ck5TW$G`_X1TCx7YCgqxXUtvns(1hwA)8-tl+B%MylYo zb$_bVa-*!Btqau%?v?H*{E6wcUss_+ZWYl9KRHzN8hT8h~Yv~z_E`FwqdG2D5 zzGUf<#@tB4oQgQ}_84U?!Uxmeh%snGQ1#>_(nEU8w zs}Q6JaR*CMMrbqtH*+xAhcCbu1ir=*?kH)(i`=pF9@vl+16P^x=79qNWR{YXu4q@D28PJe=61`AIX28c&mJMb)NI@j*I{*$|Q zVkRpM?vn9`qx>(HQPxDMy54CZV>PidH=HI{<}l@jYN!j-NMrxEQueaW_UW(~K$jPZ z5TQ#T8oCS$=(49ZQqsJ}n{?atxKA>-5HU8aQC~cn{tdInY%!pjtq)tqPE*^ae<~IgaMAT!4LWR_TYDn#}1DKeNM`+aj z#gsf6!FK5FaNIDN2A~;PY7f(ULNtVFgJo&WahtL%fkwFMrKbLMne7?MT*~&!u3-FlghaFh;YF(a1EY6w4ci5xK8LcS~U0W$Ajv)w?p(+ z+{gvTXfh%%Xw;f+MzP$G{eIBWZ;jn`Np+4{VP+qvjHS%3ikBZ!YSk6{`lQHLaQo-5 z$aAarR_5%$JS##3t3WhZ{cX$$Vx5I5Y|P$N4O1rE-=I1SQj-~#xKA@3#uL7EjA(tQ z>^JZPpn1foN_WywRyCa217RT{IQlPI#zIqyTAN8&W~sbJIZLIoK8$FQk!Jqm3Gp!1 zOm8x~|AxqYi02<+krz)-6EI)H@Qw%<5(2Ivp-X8w0MjHiGl$7F>O6$b#V9#Cu7N)j zZckeVv=S3;m<Mtt9Nb<9i#)f59{~qNh~O272CuW`bmLf3+!Ht3dEn~b z*z?H)b4_^OWqLq!l$sAGY|c~jW3Fk>LzWTM)Tc%ex^7s4M=5`)1lL5>z;kJ*jB3ua zYnI+;MQ%fScfuksy`f~5y@oA4YR#Hc81G&Y9)t%xLwLWFuK73HjR`MEx|DG_)d<3w zq1{hjy3$NflVm!w&+J-=M~rHG;_*e+`&d{Ij>p56@z7+W=H61SSu#&h-cre|q+K$Z zUXlxT_9>CG5X;wLkr#_!6^t3klm{XmN3B zr!#i6FTU1#u{0cCTZ&#&3ws>Dsb$m_Z(p`#O%t^me#L5JMZZoNOBFpz ze1^0}UMg3;@}V@oJ*lj?H$}cf<3EN)UgN?Z^}8ZmC>^+l(kDLI46*V^Gjz07Qx)53 zxDk~+8IJi)8j__OIkN7xxx5gI*|~Tu@NR9>HiL1>ZCvFwwml7Bm7$3Gfx$>vec&kP z?V76AtfV-LdhSVtG*xX$dhVtoSYBHRp|8C9?cprzyBT?FXOhf%I#=X1>8V<98)vp0 zmOk9mPBdeWOR=Ya85Vghi^dFBicq0spc+aZY-LLYWUx#)il4G9kEXlPSQWRW46)*F zro8(qZjNol=1Q00L`43(GTUZRB%ST0s&?OKJT`tBNl?vlxKr>rk zvy7dlwoRGZvISHY+q0CvRBY)KnF>&P$xc5navBo*udv8VO!V}7MT82O0o9P1Wco{$ zUSh%#`jMrt8m%JW2TUew5)d= zhLpP8OJLIvQ|?mo%-4q$gJvSr_?XCj(D+BN$kQmS&)*Q?f=J*RL@v@BJ}RJ#gwD); z+cJWh@J>&KXS{{R9qgA0LaIJ~DMJN{6WI4xMZQCIe+!Ge>I%;<{6vHZy#dkC+wxl) z(?7S2qNcfxspk5;r?F#L4y}#ABCj>kgKC=y6?y}zp|}1I zD(m+&F0u4T<8FP*UG7bdD=BknORG`_7VTR)5~Xi!;l-}Qv~vkBw59*kbK zjS+qd_yi#3w!kvOjL2`W8o?sZs;FMxEkXsSKs7iWY=uCzQ-fu~?B8cu9*zC1`z#=b#!X1I9MTigp5Dft=nTdTB+$G}=^ZFgjC~LCJ zI;TpOy-N+R5QJ2l#IeUwtit5 zJ56s}vWKAdwiZ7Hhu6sE#;@L}AaMe_zJ)TD^-nj#A}=)2bMOKYD#QjS%8%M1BJg_6QT zLa<)Sma)+ElFhO9zO@lj-cqroOOGlD=_NaSROB$^as(E6xriEuVY2B?N)B+H7b z^b!+}(3dQI)hNw|w7Ip&$0&0tr^ZfwRWu`weSbpaJJ|gyEb{CYZVa9gA%a;T8q6;5 z-j8-_dkep78BtAtS+Ar2{{2@8K`O#*zrGS+cFp2@UF0~#_Ybhhi?87R{kKJUkRI?1 z=`HNmdKIdL1>yMri)B1C;bf!s{0{yNYvrPua~H){%23utJslQ#xu~{k{vG@mi4Y+e zAR2;M-bHcJcTs%Uk~K}%Bk3rtXCK9G%39hG_UzVIb-X8)E9G91_h=Nag+*TF!fEE4 zMYzy9a1E`$&-d_8Sw=|H)ft(tjQ2Wtd_O}NeFerFb$Gje#hYkPrE~VZ$*ic`M1GT^ zif+l`xedlC{L&wM^hlk>1 z1v&+WUjMH(=>$A_O$CR-CTHRCaR4bb1LneU{8P&kX$ssJi{>_sITqj3lzm_E&9z4- zovu&2dFeHK{jA7qv}k_`ivihxQG^QifoiZn*y@N1$Y7ap6#w3`JR0|-3HK>OO#EAv zx0HBuEkza9Ol0TZ5jhVD{4ZGKB~W;nxnhXyMH^s&Xwbi87Bp0FmyAEm>n6)6Yogq2 zh_ZKiQL!`9*#GS!|Dnqd!eRhjenf4PGtA+g_vMP6c}zRw{MDr5#!LuQi6P*r+~2}fww z(pQbvk%ZRV?CCMeTFU60a}f86dZB@clsr$ir31HTj@z-=TnCgGF8eg$JIW5+QG#P=GbrJMNsK{H4M) z#+WK}MjE^Ci`<7S>#!I=mbZ%#Axj_{vRsV8p9rku#BB1Hkx)##MWon`5Hnj3Y# zNI6RhopaRripW)v`PZ<>lUaDw`6Ce`hynm(X33f+>7iKCJ%i2_!^aVH z-%$a5VK!a)(nbYH<;u5~auoH&e16?Fc=w=Vo(1PEb^6D z)FNLdLPbRes!@>#8%L-%daz8G!QGbS(Ih?sQS=#%?5C`y&9*u2P-Qg}+4oySzM~oc zEm-6gP6Cr|qAR6p1na;lo?vn9`d7ZJ0vZl#Rx+Z%EBcG%ErNT4@9x8N38oU2R zk^7Kk2Nna!@*xo-WC=t=mWweWP+?pQXlCni%h+jR+n9>2&tT-6l)Y4Io5Mr37QXU^ zFMZL6U88m@eG>QkiOceBQCXn8{{)M?@`N+ZFNtuWKj0erJ8wUgnRfrsmtSb}4}DZs z6(3&~X}7YYxaYf#WAU1$Pa3aVV_y4^!8<+viBP2Urh`i*|MZew|GCI@(D=V$k*87A zvspDlc@&=91&Kg4NR$j5Rp})r9HA|izG~zS$K>Y5nio>eQbOk(YhEI96=Yrvi#(Zy z$C{Um5J4mm4I-DvnkRj%xyO<uqVs z`H}VjW!+aWb1>KMTl0#c$rk>~Rye1`rBk*`nzd^J?i-v9sgXXu}? zjG(64t+8t7{0jXGgd!DidcaiLCsRiuu~5G+avehb9xU=g6*WAsi%=mspc<0v-|$$U zH}$rqM;e90F@?FqrcNETpv`Ybt#ziN@Pgl1Ih7Kbb3{}9Ryx8;!~_rpXa)TqkEC1FF7$N-uNMO8Raak!^24~ zr9<9qHeqh0vEv^TISyG|0gJpWM74RZ2o=-;bz!Kx+0qw{x{Yx?)`QOc)hK%@e+B0J zZxwkB-kPu&kheQTsNfB#25*B+*Q=+}V41M`@3Ab8#(mOx%)j~a5M?dp-JIT6Wi=Do z_dgc-4h8%nEb$U2odZ9(O`ec%(1KBE*XEA*JmxGtZ8yn+#V@S_RjggMEOgF zY0U4d&>3m${@;n*hb;dF76Zug4G|(_2}DDdi!t%8!nhdF%+@=WvD3tsJp4#(edhdE zt(R+Ulsj2wA^&9W3&CyS1ltI@a*$(}d-?CmZ{Nzg+qh z`)pVX@ps&B#~=7uGydxxY5Dln9R3cQZj0XKjnhNVt01l7O}g|!XZ-2b>)b7Vpa+?{ z!U=ZP0gmz0tJH_|I`$*IfG_FasXys;>Qi=2ex-ja-_lF+kN(tq+vkYlj9P{`8?|h% zuC%HRbKSz6Z@2VCQ|;!si8Q6gowmyfK-xy~*>|ceoWP!6E%F?#W5(05r1nvs2IVa!d!Cc<+9G#Ba{!Ax&4v4BpA{j3 zVjvn6FXS4mzZ#bp0o5-<1?9Q%dK&KpK_KGI_Dba--}!Yng1OYc`^&H zalR=+1d%{Ah+JOdJn3tk?^?2^NqQ)5qI%Xihc+BX(7eXU$N9vk;ivPQy~$kt)>EFM z9(LhXPIYEc>cX1znz7H9Vo#q7i+p_&?&BF3;i6Un*Qiw=%;uI0eHo$X?HrpcEq&B9 zG#X2Q&g@CP%f=nxy_C1KVdM|tsMekn*y;Tur_n&(2#Y+Sg?mP>2oY2Q(V%kq%Q(}% zWfV2Btxv?(=esy}Q07v#6__LcoXBTr?e}1j*P5tjb-xG|dIPGVx51{jRqY!r6Rxq3 zT9!v+UNtlMN%AKtZz=KSe7GvCnaIw6Q{+4(@E5SiOQ3Lb^SlTV^aIhLf5}WrtKcpf zf0)--ETgQ6a&yug9cPyO4FZrV)0h!gu`|-x|9=ws4_*Eg76a(=JrN>w2}DDei!oKL z!nhdF%+~NmioIwCjg#S<^(Fd|LIkp83!>DPyUkdycPD^}8pPE9B2a zzC+{x6Bc=m3#TYg-9)*FI;Frhl>Rq*ZP^D7I~$H z+pJB}W{q_6yEmByHYO?t1olg?7+7GJiEz=B0@n~&(MfGp$BN$0vDt0uqek)R3B^6E za6f84LGI8DFe zh1q-4CnPV9>fM&5(*!s~qPZ>Mj#YO*<=ariPvbQaF6aZUL7%ff@>`B~#4ESs51QbcCbb}p9F&~4OlH+{{u?cFdz817#*xW(yvwrKG2DmQ%tCsW8Gp)+$ASVmBj-NsmUx>{Iyms0jpOQ+X(d5XW`wWtfg9;_XXjG$Y1De8Wwq-iTW5v zMX1mkPz|m1?_*fMAbiZyBaOFp;4NuIwQZ;mP{vZ$3T&5nMC31c`!XyB)#i-4gq`*7I^^_en7k~ zLIn9hG{|2vn-f%UmyAEm>)V!5)+8BsSdOz*;?ym2T^idWROE~__I`-6mvxj^!(sqc zZWSRyl|VF9xfuHuR2UZnn%TO@GIpBQHsGO0TI;h>;xfuz+63}zSSmoAz;1s`T+j1b)~zCM!EzH8 zd6o-5;O-D1f?*&U3@_w6Q%VL43Bg*q$1)b0R^qABc-~j*4nIWsN`;bcmr?;pFWK2Y z7C8&4{2?szQV}%>Pl`|>7oZw)k!*!hrI(m+gr2qZRiiT=-sJX9y+j#H`4sG(`a6-| zVD@idk!QAWBk+a@5v&5yV0C%#l(Bc}9m|Mn+KYP`{dZ2SI$bWnY^Ri}Q`t3Fm{H1B z){$KYi@fj(?wmSDga_FH&yd~1Zfa7YT38T{|8~oGXrhTn=J|b7ms8Ht7HiIZQ&)>z zMH}^TSmdRm+NhhazW%y&K|)XB;OrA2LM%Wu#In3^>ZI?RI%vt7Ch4^pZT0M$YEZUP zJ@@R7Qgyp0mG#yZc@BjKu*fT1xR&`@5iWENTtnyY^IcQ-T1H6I)TUTd$JsOW2<6{b zT)dy=OnpC1$#2Cc;rrRnn&bV<>iQE=8Ax44_t>m-gQ+lWNs|jkeM*Wt{dHL6HLW+% zsv_x0jQE@g5o!jaq2~XvzujupXB{e78_Tv~y1l01mIHsMbD&xaol32YHCQ*8O;&m* zJg4EdTC}-F?Zu&!CTIMhqU92=+|muyCdQj};^ThlTF*Acbm*j{cD>?IwyuL`747W_ zXcx~8$D7*ej2&&vmeOrLS2*!Sz3tsTmh$kIOTGwnqSJ8-L8mNf?{&Ov$){HLHdEaj z&#KIAv0iy``E>&5TlwbNR+KJ9q+Q$ensxZ5$ZxdfKZZqK`J&PCyCPJmAE<`<2V3<~ z0U0b4j^dg#C?3I<%7zLui=`hV2%xXBc_7VS z$;BD8>3FFpm4)_Gk;^3QqQm9_ZzjAa#h(6WSmdjfsIeSAlkyOCxq)g_t^T_Pt=~*I z+tMRVHX|Ur&#MXBDeJz1nPc#0ZeLf;l$pr-xm4skn#P^5$a5&1)VoTAiv|+728)Zd zt6T+ikn$UwscuuOI^z`x9(Eq2{H4<4J?tt$9u%=0f~E6gbLz-Y7jTrw4!P=2g`)p z*`HXJN25LImgJ`--=(ajyqmL+X}3h>oy;h)@274hJ_~p9pn(62(&iOV_!+T5gb4P5 zXt2LzrT|rNmyAEm>o&_MYnn_tErxEm(=tx^OND97I;zkaY3%-GBKIN7UxCE{vfM2~ zge-w*$Z|2J163Fo1De_Tv}Noxv2Bc>dnC3#6Ob-tFVz}vWLFX51opco@*66fghgIu zqJGPa2o*{Ls-ZN?z3KP=m)|@k{-278Wi}kZyDf{N5ufx_`|NSQpYoQnzLFNpXVR&h z^v_oyd-iJ)czGL^3)dY#k?j$ z1gStYNL|P`3sePPNC+0nTb8lVgt9IXO3ycC-lL4AN=cWRR0PsX_IA}-#Js4-dm1eA zS`jt+TSTbP3s4QcNS2XQ=_Mu{p$ja1)%Z+?vAG4OODSh5p@P>JE*CkDX5d$0k*BtB zGq6X52wH(?(7OEkg7NyoO_mYW#Fz9X`@g^7QT9>=X74MgT9sXMm1&5)hVrIikyl>9 z_ZN38M<58R|{0A78Zo#f6Ou-nrf0MnEVS24^Y-p#mu=#{)os|XywbW$ZJKl zQuDvS@Pr5vY5}65mgPnAlfFp)j3sNDrj!16&)WD4l&@6KJ?|>0+TD}N)$#ix*CFxm z!6Gkl;ZW;!5iW!dTtn#Z^EVjYwv3P_s?Bj<{dmW|o_h9i)Wth5Hf66cq+i`lhpT;l zVNtE7oJAe?qB}6=IUIDW6nFY`SPU%Yi$u5(GjI(tpO6DRRb@XRc`=(Gwk(|{#Pz9s zawmlDrp%>HT!DQ!dqqB@S-Tb%c_oO3jyH=?p#z{AI_SU7X}u3;%F-i^yNwxl`C~$7 zD0?Y;^R7|fA@UjA-3E&Rxw}V%3hsbffII6o>IW@7(zx4@ahF@Cew=cbQdeM|`bm+~ zAn(s%F(7%*icmowPz~}1TVhr%?_im5ll!7&c{J*|+UD1)U!&}$+>6$#-w^o^75pPC z@+v4iz4ML;5efjJp@1c`Hmt<7Wc*=WuJ{14RCq>~qpa!jjI2pI&U*D0!jOv7SesVi zGt#&S=ZJ~`X`Tg(0i?NIgovO4(U9h1EE=mYE(SES^%2Y1X@Wal7hIn;>pcV^)f+ES zt0-{-`@T=)JCt@EEb>Yd^+6AcP@y)U8fvp#Y*wMLwC1=SvMhnd`KFBXKFifdDSs*9 z^Q>xqR^&3Mz7rOCstZ5w?iC?|W*{0gFXWP=(!xSQuwWjxjD;o`9-#J~%k~6iE>%mq zl&PYSUb4qei9Cj0z7C7LUPMj9b0Sn|2B?N+B+Hbl^b!+}(94#-YMk=mC%3x!I^`}U zHCszk<;t$v={H4AgVrCzB2R00A?U6nRX_HdX)_nqTsaw6Dzdq%!R94g@>L3xxA5 zJor-F={hX(6WWQLCUzV=I2mA zfklT$MgF2W{{vX$RUqoseNBW44FJ{9zzJS-_?D$d8gC;pZ}|m>7b$BgbJ|zPluor2 zR@*Bgufg75!y?a~unzx7gbV(FYw$P!=f+gQ=f9ET@iR-mG#1y#EcW{_*@|=JJZ`Qv zD>eG|R#Zv`^>mb5`XuW26PIL|z>0SZ!R%UC|pBBB|Z1j;c4;RbX3|7hb~eJx<06#op!d!SEvEL8fs|ok58#{+csv|sxc&vwXP~Bm#>D1yq>$7C#haUgxRNccK z9d1W=ct=|GTDca{fqQ8#{9&`jL1(tdEF-Atc5AHLK62x&@gE@+X+y+seyi%p3GDt= zBKINIkHR7^*1}Us*NYG#IUpL6TYeRM`WDM5YO))RW!L9b@I#chRBZDuvxg#wA+#1O z@p` zR1gPLgSf#~$5op-SSH-ge%rD<8tq9ZHNU+6GG#60U9`OZs>pXJ;BR4(S3u!s#7{(s zU>}GE`%7jyTM22&_`{m~xn-0!O>T;tqT?*Buh}LSrm>8!LT9A0`x_{GSwDLO76Zs~ zn+OrI1fn6!#aQ1~VO$JoX6q8m*lA+hn2N2>vig;jy|fYJRdf|0PGG-xi~L68_v^68 zt4!2y`LqZXN&~8)G|Po?6$(phj$6sH1RCE-2Q?Y;`GNNFDxdO}l0DCo`i#h3&>X=c zPjlgC+}$EXPz*$a;)PuARytTn2o}qIma)*pl1#n!E~h_A8B3M2K8*apD`6g~O_LSr z3`=h^d;hq|duZn?u*hpiIFTx4>AA-DmiS@NLlsY$yg~?4;?s>2O8V&~yZxHTZ4mtfSmcQo zJqz9vp@L?h8Z=8bMX1tCOgKWnu=G_UHW}UK_D~F;Cnr?!hWHlBUe=l12#Y+mh1>rN zM2Mgjhz708Z-^Uji0`tDs3yLoi`oAT@vA6%X_J!e+EKMCyXGphN8~jcm1|&;S6;zg zJU5B(pgZ6hx?9){HY!vL3&Qa)TgF3EO)@i+-@y}7)>6gHxr67Z$X96P2rTkiQLWVc zZ-^fgAwn%cG}N-ZgXg60;Q5jzYnrBq;{H|7?w!XdU#Xyb-UU~+yC;>a;}as+A@Q%m zA}?`aXZjftE`$zTL+J1GH^jed86iznn-ftT=OcHo57Ui)T&%OO0 z_Ol#G`g7GC)${hQY)@#poQqt_H1HV@$K6(KyyBPR&$zi}ww>(0vCyIWoZkDe{J#BQ zw_UxYx{mggt4cLb+@p4Uw8XmU7&8b zp))k)HSrNZnmVBi8o$xvJRK897sClmiSj-~m+RDJJE+suXylhm6K?qsekHh7F8>J(Ye$ePr3JsKlTB}s{8}X;{D9dw_6xt^cr<7)D zQMFXAyJ1K!6QVN65=H{AYklcDbsv$vaY`Ska+i?J&J*Rfjgs zO|+*dzx6Y2(4<(=9f9XIC}k)LXd?m&)%|>DeXZGQMj^CdYqmJgm{* z=bAs3yddy{Qq^r%>Rv!`hO6bM+u7i^BQuv(aW37?ww_;lvY6g{{wb$))+hP052^LJ z<_|r_osB3%NP((Si0V|PZfCUaHmBOOGlU{uq3hGUX1BA2Hm=a81xppLMfxC^S{&)s zWxqK|#iWSTDy3H7P1cTdtE)TfX+Mcy;-t=LH4@cmrQK-J?iVFI3T&KEBh3YLlpv_v z5T{Udn<41uq*my)D}L!n1CrWEJ2+@_M@hGgRYIF<4%fn3BGC<{N-cCJ>L`fva!pzx zsyjL(RG5&eKvGFbHR`s~WUWqxJJ{`tqi1OjUoyjgqqMJ~wNIA_Z z5@Nm9s72lDH;#4Ix4c$~g5ptKqzx+M`3O=l?NLJI=$uuJqE>jtu3eNI?rhD{o|WcK zKbYFp?VN!-V+ZKYQsmKjj8x;gowbxce$j{i6~%LXFj?-&9x0st2h;2^*#gNP@`9Z; zFPe-osM_zUMvXdw4bfZ-)uKvN?e1OM*+!adl(OoX6`&Nhppavojp1xFa*vd%wW(^I z{*iuH)_q8SBy6};(Qa*1y$&f*TjJ~FvQ+oWt_tn8a@8e&i1?(wQIz8llJ%i>iy+pM zcM)yT@l-o-jMU?oYt2d0K-w_S`5@&eY)|yuS8G7LII}XYjVt3;i8Rxg@G4Y=XUT=C z=S2i^Mw#w9RPq`$K~-`q6`HX^3-b96ok$!%60lWkGHKS1byj%I!`&O&6P>k5b3*l( zq?m29Nd|eba5j-sB&`UWs+uIT$&lL6&SoRE={5+8k}0>z>|!;lP$f=ZgT&7UZ` zX!-B^ZdV;Lktt<8 diff --git a/core/dbt/docs/build/html/.buildinfo b/core/dbt/docs/build/html/.buildinfo index 39803f13c3e..f5b6f776592 100644 --- a/core/dbt/docs/build/html/.buildinfo +++ b/core/dbt/docs/build/html/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 1ee31fc16e025fb98598189ba2cb5fcb +config: e27d6c1c419f2f0af393858cdf674109 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/core/dbt/docs/build/html/_sources/index.rst.txt b/core/dbt/docs/build/html/_sources/index.rst.txt index d5e3c6007af..93d34a648f2 100644 --- a/core/dbt/docs/build/html/_sources/index.rst.txt +++ b/core/dbt/docs/build/html/_sources/index.rst.txt @@ -1,4 +1,36 @@ dbt-core's API documentation ============================ +How to invoke dbt commands in python runtime +-------------------------------------------- + +Right now the best way to invoke a command from python runtime is to use the `dbtRunner` we exposed + +.. code-block:: python + from dbt.cli.main import dbtRunner + cli_args = ['run', '--project-dir', 'jaffle_shop'] + + # initialize the dbt runner + dbt = dbtRunner() + # run the command + res, success = dbt.invoke(args) + +You can also pass in pre constructed object into dbtRunner, and we will use those objects instead of loading up from the disk. + +.. code-block:: python + + # preload profile and project + profile = load_profile(project_dir, {}, 'testing-postgres') + project = load_project(project_dir, False, profile, {}) + + # initialize the runner with pre-loaded profile and project + dbt = dbtRunner(profile=profile, project=project) + # run the command, this will use the pre-loaded profile and project instead of loading + res, success = dbt.invoke(cli_args) + + +For the full example code, you can refer to `core/dbt/cli/example.py` + +API documentation +----------------- .. dbt_click:: dbt.cli.main:cli diff --git a/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js b/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js deleted file mode 100644 index 8549469dc29..00000000000 --- a/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js +++ /dev/null @@ -1,134 +0,0 @@ -/* - * _sphinx_javascript_frameworks_compat.js - * ~~~~~~~~~~ - * - * Compatability shim for jQuery and underscores.js. - * - * WILL BE REMOVED IN Sphinx 6.0 - * xref RemovedInSphinx60Warning - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - - -/** - * small helper function to urldecode strings - * - * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL - */ -jQuery.urldecode = function(x) { - if (!x) { - return x - } - return decodeURIComponent(x.replace(/\+/g, ' ')); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} diff --git a/core/dbt/docs/build/html/_static/basic.css b/core/dbt/docs/build/html/_static/basic.css index 4e9a9f1faca..7577acb1ad1 100644 --- a/core/dbt/docs/build/html/_static/basic.css +++ b/core/dbt/docs/build/html/_static/basic.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- basic theme. * - * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -324,6 +324,7 @@ aside.sidebar { p.sidebar-title { font-weight: bold; } + nav.contents, aside.topic, div.admonition, div.topic, blockquote { @@ -331,6 +332,7 @@ div.admonition, div.topic, blockquote { } /* -- topics ---------------------------------------------------------------- */ + nav.contents, aside.topic, div.topic { @@ -606,6 +608,7 @@ ol.simple p, ul.simple p { margin-bottom: 0; } + aside.footnote > span, div.citation > span { float: left; diff --git a/core/dbt/docs/build/html/_static/doctools.js b/core/dbt/docs/build/html/_static/doctools.js index 527b876ca63..d06a71d7518 100644 --- a/core/dbt/docs/build/html/_static/doctools.js +++ b/core/dbt/docs/build/html/_static/doctools.js @@ -4,7 +4,7 @@ * * Base JavaScript utilities for all Sphinx HTML documentation. * - * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ diff --git a/core/dbt/docs/build/html/_static/jquery-3.6.0.js b/core/dbt/docs/build/html/_static/jquery-3.6.0.js deleted file mode 100644 index fc6c299b73e..00000000000 --- a/core/dbt/docs/build/html/_static/jquery-3.6.0.js +++ /dev/null @@ -1,10881 +0,0 @@ -/*! - * jQuery JavaScript Library v3.6.0 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright OpenJS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2021-03-02T17:08Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var flat = arr.flat ? function( array ) { - return arr.flat.call( array ); -} : function( array ) { - return arr.concat.apply( [], array ); -}; - - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - -var isFunction = function isFunction( obj ) { - - // Support: Chrome <=57, Firefox <=52 - // In some browsers, typeof returns "function" for HTML elements - // (i.e., `typeof document.createElement( "object" ) === "function"`). - // We don't want to classify *any* DOM node as a function. - // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 - // Plus for old WebKit, typeof returns "function" for HTML collections - // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) - return typeof obj === "function" && typeof obj.nodeType !== "number" && - typeof obj.item !== "function"; - }; - - -var isWindow = function isWindow( obj ) { - return obj != null && obj === obj.window; - }; - - -var document = window.document; - - - - var preservedScriptAttributes = { - type: true, - src: true, - nonce: true, - noModule: true - }; - - function DOMEval( code, node, doc ) { - doc = doc || document; - - var i, val, - script = doc.createElement( "script" ); - - script.text = code; - if ( node ) { - for ( i in preservedScriptAttributes ) { - - // Support: Firefox 64+, Edge 18+ - // Some browsers don't support the "nonce" property on scripts. - // On the other hand, just using `getAttribute` is not enough as - // the `nonce` attribute is reset to an empty string whenever it - // becomes browsing-context connected. - // See https://github.com/whatwg/html/issues/2369 - // See https://html.spec.whatwg.org/#nonce-attributes - // The `node.getAttribute` check was added for the sake of - // `jQuery.globalEval` so that it can fake a nonce-containing node - // via an object. - val = node[ i ] || node.getAttribute && node.getAttribute( i ); - if ( val ) { - script.setAttribute( i, val ); - } - } - } - doc.head.appendChild( script ).parentNode.removeChild( script ); - } - - -function toType( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; -} -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.6.0", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - even: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return ( i + 1 ) % 2; - } ) ); - }, - - odd: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return i % 2; - } ) ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - copy = options[ name ]; - - // Prevent Object.prototype pollution - // Prevent never-ending loop - if ( name === "__proto__" || target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - src = target[ name ]; - - // Ensure proper type for the source value - if ( copyIsArray && !Array.isArray( src ) ) { - clone = []; - } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { - clone = {}; - } else { - clone = src; - } - copyIsArray = false; - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - // Evaluates a script in a provided context; falls back to the global one - // if not specified. - globalEval: function( code, options, doc ) { - DOMEval( code, { nonce: options && options.nonce }, doc ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return flat( ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), - function( _i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); - } ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = toType( obj ); - - if ( isFunction( obj ) || isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.6 - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://js.foundation/ - * - * Date: 2021-02-16 - */ -( function( window ) { -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - nonnativeSelectorCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ( {} ).hasOwnProperty, - arr = [], - pop = arr.pop, - pushNative = arr.push, - push = arr.push, - slice = arr.slice, - - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[ i ] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + - "ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram - identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + - "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - - // "Attribute values must be CSS identifiers [capture 5] - // or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + - whitespace + "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + - whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + - "*" ), - rdescend = new RegExp( whitespace + "|>" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + - whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + - whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + - "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + - "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rhtml = /HTML$/i, - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), - funescape = function( escape, nonHex ) { - var high = "0x" + escape.slice( 1 ) - 0x10000; - - return nonHex ? - - // Strip the backslash prefix from a non-hex escape sequence - nonHex : - - // Replace a hexadecimal escape sequence with the encoded Unicode code point - // Support: IE <=11+ - // For values outside the Basic Multilingual Plane (BMP), manually construct a - // surrogate pair - high < 0 ? - String.fromCharCode( high + 0x10000 ) : - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + - ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - inDisabledFieldset = addCombinator( - function( elem ) { - return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - ( arr = slice.call( preferredDoc.childNodes ) ), - preferredDoc.childNodes - ); - - // Support: Android<4.0 - // Detect silently failing push.apply - // eslint-disable-next-line no-unused-expressions - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - pushNative.apply( target, slice.call( els ) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - - // Can't trust NodeList.length - while ( ( target[ j++ ] = els[ i++ ] ) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - setDocument( context ); - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { - - // ID selector - if ( ( m = match[ 1 ] ) ) { - - // Document context - if ( nodeType === 9 ) { - if ( ( elem = context.getElementById( m ) ) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && ( elem = newContext.getElementById( m ) ) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[ 2 ] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !nonnativeSelectorCache[ selector + " " ] && - ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && - - // Support: IE 8 only - // Exclude object elements - ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { - - newSelector = selector; - newContext = context; - - // qSA considers elements outside a scoping root when evaluating child or - // descendant combinators, which is not what we want. - // In such cases, we work around the behavior by prefixing every selector in the - // list with an ID selector referencing the scope context. - // The technique has to be used as well when a leading combinator is used - // as such selectors are not recognized by querySelectorAll. - // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && - ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - - // We can use :scope instead of the ID hack if the browser - // supports it & if we're not changing the context. - if ( newContext !== context || !support.scope ) { - - // Capture the context ID, setting it first if necessary - if ( ( nid = context.getAttribute( "id" ) ) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", ( nid = expando ) ); - } - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + - toSelector( groups[ i ] ); - } - newSelector = groups.join( "," ); - } - - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - nonnativeSelectorCache( selector, true ); - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return ( cache[ key + " " ] = value ); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement( "fieldset" ); - - try { - return !!fn( el ); - } catch ( e ) { - return false; - } finally { - - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split( "|" ), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[ i ] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( ( cur = cur.nextSibling ) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return ( name === "input" || name === "button" ) && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction( function( argument ) { - argument = +argument; - return markFunction( function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ ( j = matchIndexes[ i ] ) ] ) { - seed[ j ] = !( matches[ j ] = seed[ j ] ); - } - } - } ); - } ); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - var namespace = elem && elem.namespaceURI, - docElem = elem && ( elem.ownerDocument || elem ).documentElement; - - // Support: IE <=8 - // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes - // https://bugs.jquery.com/ticket/4833 - return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9 - 11+, Edge 12 - 18+ - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( preferredDoc != document && - ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, - // Safari 4 - 5 only, Opera <=11.6 - 12.x only - // IE/Edge & older browsers don't support the :scope pseudo-class. - // Support: Safari 6.0 only - // Safari 6.0 supports :scope but it's an alias of :root there. - support.scope = assert( function( el ) { - docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); - return typeof el.querySelectorAll !== "undefined" && - !el.querySelectorAll( ":scope fieldset div" ).length; - } ); - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert( function( el ) { - el.className = "i"; - return !el.getAttribute( "className" ); - } ); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert( function( el ) { - el.appendChild( document.createComment( "" ) ); - return !el.getElementsByTagName( "*" ).length; - } ); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert( function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - } ); - - // ID filter and find - if ( support.getById ) { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute( "id" ) === attrId; - }; - }; - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode( "id" ); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( ( elem = elems[ i++ ] ) ) { - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find[ "TAG" ] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { - - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert( function( el ) { - - var input; - - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll( "[selected]" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push( "~=" ); - } - - // Support: IE 11+, Edge 15 - 18+ - // IE 11/Edge don't find elements on a `[name='']` query in some cases. - // Adding a temporary attribute to the document before the selection works - // around the issue. - // Interestingly, IE 10 & older don't seem to have the issue. - input = document.createElement( "input" ); - input.setAttribute( "name", "" ); - el.appendChild( input ); - if ( !el.querySelectorAll( "[name='']" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + - whitespace + "*(?:''|\"\")" ); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll( ":checked" ).length ) { - rbuggyQSA.push( ":checked" ); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push( ".#.+[+~]" ); - } - - // Support: Firefox <=3.6 - 5 only - // Old Firefox doesn't throw on a badly-escaped identifier. - el.querySelectorAll( "\\\f" ); - rbuggyQSA.push( "[\\r\\n\\f]" ); - } ); - - assert( function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement( "input" ); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll( "[name=d]" ).length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: Opera 10 - 11 only - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll( "*,:x" ); - rbuggyQSA.push( ",.*:" ); - } ); - } - - if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector ) ) ) ) { - - assert( function( el ) { - - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - } ); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - ) ); - } : - function( a, b ) { - if ( b ) { - while ( ( b = b.parentNode ) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { - - // Choose the first element that is related to our preferred document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( a == document || a.ownerDocument == preferredDoc && - contains( preferredDoc, a ) ) { - return -1; - } - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( b == document || b.ownerDocument == preferredDoc && - contains( preferredDoc, b ) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - return a == document ? -1 : - b == document ? 1 : - /* eslint-enable eqeqeq */ - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( ( cur = cur.parentNode ) ) { - ap.unshift( cur ); - } - cur = b; - while ( ( cur = cur.parentNode ) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[ i ] === bp[ i ] ) { - i++; - } - - return i ? - - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[ i ], bp[ i ] ) : - - // Otherwise nodes in our document sort first - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - ap[ i ] == preferredDoc ? -1 : - bp[ i ] == preferredDoc ? 1 : - /* eslint-enable eqeqeq */ - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - setDocument( elem ); - - if ( support.matchesSelector && documentIsHTML && - !nonnativeSelectorCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch ( e ) { - nonnativeSelectorCache( expr, true ); - } - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( context.ownerDocument || context ) != document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( elem.ownerDocument || elem ) != document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return ( sel + "" ).replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - - // If no nodeType, this is expected to be an array - while ( ( node = elem[ i++ ] ) ) { - - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[ 1 ] = match[ 1 ].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[ 3 ] = ( match[ 3 ] || match[ 4 ] || - match[ 5 ] || "" ).replace( runescape, funescape ); - - if ( match[ 2 ] === "~=" ) { - match[ 3 ] = " " + match[ 3 ] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[ 1 ] = match[ 1 ].toLowerCase(); - - if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { - - // nth-* requires argument - if ( !match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[ 4 ] = +( match[ 4 ] ? - match[ 5 ] + ( match[ 6 ] || 1 ) : - 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); - match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); - - // other types prohibit arguments - } else if ( match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[ 6 ] && match[ 2 ]; - - if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[ 3 ] ) { - match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - - // Get excess from tokenize (recursively) - ( excess = tokenize( unquoted, true ) ) && - - // advance to the next closing parenthesis - ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { - - // excess is a negative index - match[ 0 ] = match[ 0 ].slice( 0, excess ); - match[ 2 ] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { - return true; - } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - ( pattern = new RegExp( "(^|" + whitespace + - ")" + className + "(" + whitespace + "|$)" ) ) && classCache( - className, function( elem ) { - return pattern.test( - typeof elem.className === "string" && elem.className || - typeof elem.getAttribute !== "undefined" && - elem.getAttribute( "class" ) || - "" - ); - } ); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - /* eslint-disable max-len */ - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - /* eslint-enable max-len */ - - }; - }, - - "CHILD": function( type, what, _argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, _context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( ( node = node[ dir ] ) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( ( node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - - // Use previously-cached element index if available - if ( useCache ) { - - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - - // Use the same loop as above to seek `elem` from the start - while ( ( node = ++nodeIndex && node && node[ dir ] || - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || - ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction( function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[ i ] ); - seed[ idx ] = !( matches[ idx ] = matched[ i ] ); - } - } ) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - - // Potentially complex pseudos - "not": markFunction( function( selector ) { - - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction( function( seed, matches, _context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( ( elem = unmatched[ i ] ) ) { - seed[ i ] = !( matches[ i ] = elem ); - } - } - } ) : - function( elem, _context, xml ) { - input[ 0 ] = elem; - matcher( input, null, xml, results ); - - // Don't keep the element (issue #299) - input[ 0 ] = null; - return !results.pop(); - }; - } ), - - "has": markFunction( function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - } ), - - "contains": markFunction( function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; - }; - } ), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - - // lang value must be a valid identifier - if ( !ridentifier.test( lang || "" ) ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( ( elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); - return false; - }; - } ), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && - ( !document.hasFocus || document.hasFocus() ) && - !!( elem.type || elem.href || ~elem.tabIndex ); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return ( nodeName === "input" && !!elem.checked ) || - ( nodeName === "option" && !!elem.selected ); - }, - - "selected": function( elem ) { - - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - // eslint-disable-next-line no-unused-expressions - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos[ "empty" ]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( ( attr = elem.getAttribute( "type" ) ) == null || - attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo( function() { - return [ 0 ]; - } ), - - "last": createPositionalPseudo( function( _matchIndexes, length ) { - return [ length - 1 ]; - } ), - - "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - } ), - - "even": createPositionalPseudo( function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "odd": createPositionalPseudo( function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? - argument + length : - argument > length ? - length : - argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ) - } -}; - -Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || ( match = rcomma.exec( soFar ) ) ) { - if ( match ) { - - // Don't consume trailing commas as valid - soFar = soFar.slice( match[ 0 ].length ) || soFar; - } - groups.push( ( tokens = [] ) ); - } - - matched = false; - - // Combinators - if ( ( match = rcombinators.exec( soFar ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - - // Cast descendant combinators to space - type: match[ 0 ].replace( rtrim, " " ) - } ); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || - ( match = preFilters[ type ]( match ) ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - type: type, - matches: match - } ); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[ i ].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || ( elem[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || - ( outerCache[ elem.uniqueID ] = {} ); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( ( oldCache = uniqueCache[ key ] ) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return ( newCache[ 2 ] = oldCache[ 2 ] ); - } else { - - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[ i ]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[ 0 ]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[ i ], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( ( elem = unmatched[ i ] ) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction( function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( - selector || "*", - context.nodeType ? [ context ] : context, - [] - ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( ( elem = temp[ i ] ) ) { - matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) ) { - - // Restore matcherIn since elem is not yet a final match - temp.push( ( matcherIn[ i ] = elem ) ); - } - } - postFinder( null, ( matcherOut = [] ), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) && - ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { - - seed[ temp ] = !( results[ temp ] = elem ); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - } ); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[ 0 ].type ], - implicitRelative = leadingRelative || Expr.relative[ " " ], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - ( checkContext = context ).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { - matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; - } else { - matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[ j ].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens - .slice( 0, i - 1 ) - .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), - - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), - len = elems.length; - - if ( outermost ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - outermostContext = context == document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( !context && elem.ownerDocument != document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( ( matcher = elementMatchers[ j++ ] ) ) { - if ( matcher( elem, context || document, xml ) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - - // They will have gone through all possible matchers - if ( ( elem = !matcher && elem ) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( ( matcher = setMatchers[ j++ ] ) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !( unmatched[ i ] || setMatched[ i ] ) ) { - setMatched[ i ] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[ i ] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( - selector, - matcherFromGroupMatchers( elementMatchers, setMatchers ) - ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( ( selector = compiled.selector || selector ) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[ 0 ] = match[ 0 ].slice( 0 ); - if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { - - context = ( Expr.find[ "ID" ]( token.matches[ 0 ] - .replace( runescape, funescape ), context ) || [] )[ 0 ]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[ i ]; - - // Abort if we hit a combinator - if ( Expr.relative[ ( type = token.type ) ] ) { - break; - } - if ( ( find = Expr.find[ type ] ) ) { - - // Search, expanding context for leading sibling combinators - if ( ( seed = find( - token.matches[ 0 ].replace( runescape, funescape ), - rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || - context - ) ) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert( function( el ) { - - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; -} ); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert( function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute( "href" ) === "#"; -} ) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - } ); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert( function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -} ) ) { - addHandle( "value", function( elem, _name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - } ); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert( function( el ) { - return el.getAttribute( "disabled" ) == null; -} ) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; - } - } ); -} - -return Sizzle; - -} )( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -} -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Filtered directly for both simple and complex selectors - return jQuery.filter( qualifier, elements, not ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, _i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, _i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, _i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( elem.contentDocument != null && - - // Support: IE 11+ - // elements with no `data` attribute has an object - // `contentDocument` with a `null` prototype. - getProto( elem.contentDocument ) ) { - - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && toType( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( _i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // rejected_handlers.disable - // fulfilled_handlers.disable - tuples[ 3 - i ][ 3 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock, - - // progress_handlers.lock - tuples[ 0 ][ 3 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the primary Deferred - primary = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - primary.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( primary.state() === "pending" || - isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return primary.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); - } - - return primary.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( toType( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, _key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; - - -// Matches dashed string for camelizing -var rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g; - -// Used by camelCase as callback to replace() -function fcamelCase( _all, letter ) { - return letter.toUpperCase(); -} - -// Convert dashed to camelCase; used by the css and data modules -// Support: IE <=9 - 11, Edge 12 - 15 -// Microsoft forgot to hump their vendor prefix (#9572) -function camelCase( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); -} -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( camelCase ); - } else { - key = camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var documentElement = document.documentElement; - - - - var isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ); - }, - composed = { composed: true }; - - // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only - // Check attachment across shadow DOM boundaries when possible (gh-3504) - // Support: iOS 10.0-10.2 only - // Early iOS 10 versions support `attachShadow` but not `getRootNode`, - // leading to errors. We need to check for `getRootNode`. - if ( documentElement.getRootNode ) { - isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ) || - elem.getRootNode( composed ) === elem.ownerDocument; - }; - } -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - isAttached( elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, scale, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = elem.nodeType && - ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Support: Firefox <=54 - // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) - initial = initial / 2; - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - while ( maxIterations-- ) { - - // Evaluate and update our best guess (doubling guesses that zero out). - // Finish if the scale equals or crosses 1 (making the old*new product non-positive). - jQuery.style( elem, prop, initialInUnit + unit ); - if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { - maxIterations = 0; - } - initialInUnit = initialInUnit / scale; - - } - - initialInUnit = initialInUnit * 2; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); - -var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); - - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; - - // Support: IE <=9 only - // IE <=9 replaces "; - support.option = !!div.lastChild; -} )(); - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
    " ], - col: [ 2, "", "
    " ], - tr: [ 2, "", "
    " ], - td: [ 3, "", "
    " ], - - _default: [ 0, "", "" ] -}; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -// Support: IE <=9 only -if ( !support.option ) { - wrapMap.optgroup = wrapMap.option = [ 1, "" ]; -} - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, attached, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( toType( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - attached = isAttached( elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( attached ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 - 11+ -// focus() and blur() are asynchronous, except when they are no-op. -// So expect focus to be synchronous when the element is already active, -// and blur to be synchronous when the element is not already active. -// (focus and blur are always synchronous in other supported browsers, -// this just defines when we can count on it). -function expectSync( elem, type ) { - return ( elem === safeActiveElement() ) === ( type === "focus" ); -} - -// Support: IE <=9 only -// Accessing document.activeElement can throw unexpectedly -// https://bugs.jquery.com/ticket/13393 -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Only attach events to objects that accept data - if ( !acceptData( elem ) ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = Object.create( null ); - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( nativeEvent ), - - handlers = ( - dataPriv.get( this, "events" ) || Object.create( null ) - )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // If the event is namespaced, then each handler is only invoked if it is - // specially universal or its namespaces are a superset of the event's. - if ( !event.rnamespace || handleObj.namespace === false || - event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - - // Utilize native event to ensure correct state for checkable inputs - setup: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Claim the first handler - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - // dataPriv.set( el, "click", ... ) - leverageNative( el, "click", returnTrue ); - } - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Force setup before triggering a click - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - leverageNative( el, "click" ); - } - - // Return non-false to allow normal event-path propagation - return true; - }, - - // For cross-browser consistency, suppress native .click() on links - // Also prevent it if we're currently inside a leveraged native-event stack - _default: function( event ) { - var target = event.target; - return rcheckableType.test( target.type ) && - target.click && nodeName( target, "input" ) && - dataPriv.get( target, "click" ) || - nodeName( target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -// Ensure the presence of an event listener that handles manually-triggered -// synthetic events by interrupting progress until reinvoked in response to -// *native* events that it fires directly, ensuring that state changes have -// already occurred before other listeners are invoked. -function leverageNative( el, type, expectSync ) { - - // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add - if ( !expectSync ) { - if ( dataPriv.get( el, type ) === undefined ) { - jQuery.event.add( el, type, returnTrue ); - } - return; - } - - // Register the controller as a special universal handler for all event namespaces - dataPriv.set( el, type, false ); - jQuery.event.add( el, type, { - namespace: false, - handler: function( event ) { - var notAsync, result, - saved = dataPriv.get( this, type ); - - if ( ( event.isTrigger & 1 ) && this[ type ] ) { - - // Interrupt processing of the outer synthetic .trigger()ed event - // Saved data should be false in such cases, but might be a leftover capture object - // from an async native handler (gh-4350) - if ( !saved.length ) { - - // Store arguments for use when handling the inner native event - // There will always be at least one argument (an event object), so this array - // will not be confused with a leftover capture object. - saved = slice.call( arguments ); - dataPriv.set( this, type, saved ); - - // Trigger the native event and capture its result - // Support: IE <=9 - 11+ - // focus() and blur() are asynchronous - notAsync = expectSync( this, type ); - this[ type ](); - result = dataPriv.get( this, type ); - if ( saved !== result || notAsync ) { - dataPriv.set( this, type, false ); - } else { - result = {}; - } - if ( saved !== result ) { - - // Cancel the outer synthetic event - event.stopImmediatePropagation(); - event.preventDefault(); - - // Support: Chrome 86+ - // In Chrome, if an element having a focusout handler is blurred by - // clicking outside of it, it invokes the handler synchronously. If - // that handler calls `.remove()` on the element, the data is cleared, - // leaving `result` undefined. We need to guard against this. - return result && result.value; - } - - // If this is an inner synthetic event for an event with a bubbling surrogate - // (focus or blur), assume that the surrogate already propagated from triggering the - // native event and prevent that from happening again here. - // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the - // bubbling surrogate propagates *after* the non-bubbling base), but that seems - // less bad than duplication. - } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { - event.stopPropagation(); - } - - // If this is a native event triggered above, everything is now in order - // Fire an inner synthetic event with the original arguments - } else if ( saved.length ) { - - // ...and capture the result - dataPriv.set( this, type, { - value: jQuery.event.trigger( - - // Support: IE <=9 - 11+ - // Extend with the prototype to reset the above stopImmediatePropagation() - jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), - saved.slice( 1 ), - this - ) - } ); - - // Abort handling of the native event - event.stopImmediatePropagation(); - } - } - } ); -} - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || Date.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - code: true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - which: true -}, jQuery.event.addProp ); - -jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { - jQuery.event.special[ type ] = { - - // Utilize native event if possible so blur/focus sequence is correct - setup: function() { - - // Claim the first handler - // dataPriv.set( this, "focus", ... ) - // dataPriv.set( this, "blur", ... ) - leverageNative( this, type, expectSync ); - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function() { - - // Force setup before trigger - leverageNative( this, type ); - - // Return non-false to allow normal event-path propagation - return true; - }, - - // Suppress native focus or blur as it's already being fired - // in leverageNative. - _default: function() { - return true; - }, - - delegateType: delegateType - }; -} ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - // Support: IE <=10 - 11, Edge 12 - 13 only - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( elem ).children( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { - elem.type = elem.type.slice( 5 ); - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.get( src ); - events = pdataOld.events; - - if ( events ) { - dataPriv.remove( dest, "handle events" ); - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = flat( args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - valueIsFunction = isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( valueIsFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( valueIsFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl && !node.noModule ) { - jQuery._evalUrl( node.src, { - nonce: node.nonce || node.getAttribute( "nonce" ) - }, doc ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && isAttached( node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html; - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = isAttached( elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - -var swap = function( elem, options, callback ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.call( elem ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - -var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - container.style.cssText = "position:absolute;left:-11111px;width:60px;" + - "margin-top:1px;padding:0;border:0"; - div.style.cssText = - "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + - "margin:auto;border:1px;padding:1px;" + - "width:60%;top:1%"; - documentElement.appendChild( container ).appendChild( div ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; - - // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 - // Some styles come back with percentage values, even though they shouldn't - div.style.right = "60%"; - pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; - - // Support: IE 9 - 11 only - // Detect misreporting of content dimensions for box-sizing:border-box elements - boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; - - // Support: IE 9 only - // Detect overflow:scroll screwiness (gh-3699) - // Support: Chrome <=64 - // Don't get tricked when zoom affects offsetWidth (gh-4029) - div.style.position = "absolute"; - scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - function roundPixelMeasures( measure ) { - return Math.round( parseFloat( measure ) ); - } - - var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableTrDimensionsVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - jQuery.extend( support, { - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelBoxStyles: function() { - computeStyleTests(); - return pixelBoxStylesVal; - }, - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - }, - scrollboxSize: function() { - computeStyleTests(); - return scrollboxSizeVal; - }, - - // Support: IE 9 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Behavior in IE 9 is more subtle than in newer versions & it passes - // some versions of this test; make sure not to make it pass there! - // - // Support: Firefox 70+ - // Only Firefox includes border widths - // in computed dimensions. (gh-4529) - reliableTrDimensions: function() { - var table, tr, trChild, trStyle; - if ( reliableTrDimensionsVal == null ) { - table = document.createElement( "table" ); - tr = document.createElement( "tr" ); - trChild = document.createElement( "div" ); - - table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; - tr.style.cssText = "border:1px solid"; - - // Support: Chrome 86+ - // Height set through cssText does not get applied. - // Computed height then comes back as 0. - tr.style.height = "1px"; - trChild.style.height = "9px"; - - // Support: Android 8 Chrome 86+ - // In our bodyBackground.html iframe, - // display for all div elements is set to "inline", - // which causes a problem only in Android 8 Chrome 86. - // Ensuring the div is display: block - // gets around this issue. - trChild.style.display = "block"; - - documentElement - .appendChild( table ) - .appendChild( tr ) - .appendChild( trChild ); - - trStyle = window.getComputedStyle( tr ); - reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + - parseInt( trStyle.borderTopWidth, 10 ) + - parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; - - documentElement.removeChild( table ); - } - return reliableTrDimensionsVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !isAttached( elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style, - vendorProps = {}; - -// Return a vendor-prefixed property or undefined -function vendorPropName( name ) { - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a potentially-mapped jQuery.cssProps or vendor prefixed property -function finalPropName( name ) { - var final = jQuery.cssProps[ name ] || vendorProps[ name ]; - - if ( final ) { - return final; - } - if ( name in emptyStyle ) { - return name; - } - return vendorProps[ name ] = vendorPropName( name ) || name; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }; - -function setPositiveNumber( _elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { - var i = dimension === "width" ? 1 : 0, - extra = 0, - delta = 0; - - // Adjustment may not be necessary - if ( box === ( isBorderBox ? "border" : "content" ) ) { - return 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin - if ( box === "margin" ) { - delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); - } - - // If we get here with a content-box, we're seeking "padding" or "border" or "margin" - if ( !isBorderBox ) { - - // Add padding - delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // For "border" or "margin", add border - if ( box !== "padding" ) { - delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - - // But still keep track of it otherwise - } else { - extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - - // If we get here with a border-box (content + padding + border), we're seeking "content" or - // "padding" or "margin" - } else { - - // For "content", subtract padding - if ( box === "content" ) { - delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // For "content" or "padding", subtract border - if ( box !== "margin" ) { - delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - // Account for positive content-box scroll gutter when requested by providing computedVal - if ( !isBorderBox && computedVal >= 0 ) { - - // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border - // Assuming integer scroll gutter, subtract the rest and round down - delta += Math.max( 0, Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - computedVal - - delta - - extra - - 0.5 - - // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter - // Use an explicit zero to avoid NaN (gh-3964) - ) ) || 0; - } - - return delta; -} - -function getWidthOrHeight( elem, dimension, extra ) { - - // Start with computed style - var styles = getStyles( elem ), - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). - // Fake content-box until we know it's needed to know the true value. - boxSizingNeeded = !support.boxSizingReliable() || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - valueIsBorderBox = isBorderBox, - - val = curCSS( elem, dimension, styles ), - offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); - - // Support: Firefox <=54 - // Return a confounding non-pixel value or feign ignorance, as appropriate. - if ( rnumnonpx.test( val ) ) { - if ( !extra ) { - return val; - } - val = "auto"; - } - - - // Support: IE 9 - 11 only - // Use offsetWidth/offsetHeight for when box sizing is unreliable. - // In those cases, the computed value can be trusted to be border-box. - if ( ( !support.boxSizingReliable() && isBorderBox || - - // Support: IE 10 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Interestingly, in some cases IE 9 doesn't suffer from this issue. - !support.reliableTrDimensions() && nodeName( elem, "tr" ) || - - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - val === "auto" || - - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && - - // Make sure the element is visible & connected - elem.getClientRects().length ) { - - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Where available, offsetWidth/offsetHeight approximate border box dimensions. - // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the - // retrieved value as a content box dimension. - valueIsBorderBox = offsetProp in elem; - if ( valueIsBorderBox ) { - val = elem[ offsetProp ]; - } - } - - // Normalize "" and auto - val = parseFloat( val ) || 0; - - // Adjust for the element's box model - return ( val + - boxModelAdjustment( - elem, - dimension, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles, - - // Provide the current computed size to request scroll gutter calculation (gh-3589) - val - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "gridArea": true, - "gridColumn": true, - "gridColumnEnd": true, - "gridColumnStart": true, - "gridRow": true, - "gridRowEnd": true, - "gridRowStart": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: {}, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append - // "px" to a few hardcoded values. - if ( type === "number" && !isCustomProp ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( _i, dimension ) { - jQuery.cssHooks[ dimension ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, dimension, extra ); - } ) : - getWidthOrHeight( elem, dimension, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = getStyles( elem ), - - // Only read styles.position if the test has a chance to fail - // to avoid forcing a reflow. - scrollboxSizeBuggy = !support.scrollboxSize() && - styles.position === "absolute", - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) - boxSizingNeeded = scrollboxSizeBuggy || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - subtract = extra ? - boxModelAdjustment( - elem, - dimension, - extra, - isBorderBox, - styles - ) : - 0; - - // Account for unreliable border-box dimensions by comparing offset* to computed and - // faking a content-box to get border and padding (gh-3699) - if ( isBorderBox && scrollboxSizeBuggy ) { - subtract -= Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - parseFloat( styles[ dimension ] ) - - boxModelAdjustment( elem, dimension, "border", false, styles ) - - 0.5 - ); - } - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ dimension ] = value; - value = jQuery.css( elem, dimension ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( prefix !== "margin" ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && ( - jQuery.cssHooks[ tween.prop ] || - tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = Date.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 15 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY and Edge just mirrors - // the overflowX value there. - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - result.stop.bind( result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = Date.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -function classesToArray( value ) { - if ( Array.isArray( value ) ) { - return value; - } - if ( typeof value === "string" ) { - return value.match( rnothtmlwhite ) || []; - } - return []; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isValidValue = type === "string" || Array.isArray( value ); - - if ( typeof stateVal === "boolean" && isValidValue ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( isValidValue ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = classesToArray( value ); - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, valueIsFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - valueIsFunction = isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( valueIsFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -support.focusin = "onfocusin" in window; - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - stopPropagationCallback = function( e ) { - e.stopPropagation(); - }; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = lastElement = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - lastElement = cur; - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - - if ( event.isPropagationStopped() ) { - lastElement.addEventListener( type, stopPropagationCallback ); - } - - elem[ type ](); - - if ( event.isPropagationStopped() ) { - lastElement.removeEventListener( type, stopPropagationCallback ); - } - - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - - // Handle: regular nodes (via `this.ownerDocument`), window - // (via `this.document`) & document (via `this`). - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = { guid: Date.now() }; - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml, parserErrorElem; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) {} - - parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; - if ( !xml || parserErrorElem ) { - jQuery.error( "Invalid XML: " + ( - parserErrorElem ? - jQuery.map( parserErrorElem.childNodes, function( el ) { - return el.textContent; - } ).join( "\n" ) : - data - ) ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && toType( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - if ( a == null ) { - return ""; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ).filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ).map( function( _i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - -originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() + " " ] = - ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) - .concat( match[ 2 ] ); - } - } - match = responseHeaders[ key.toLowerCase() + " " ]; - } - return match == null ? null : match.join( ", " ); - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 15 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available and should be processed, append data to url - if ( s.data && ( s.processData || typeof s.data === "string" ) ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + - uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Use a noop converter for missing script but not if jsonp - if ( !isSuccess && - jQuery.inArray( "script", s.dataTypes ) > -1 && - jQuery.inArray( "json", s.dataTypes ) < 0 ) { - s.converters[ "text script" ] = function() {}; - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( _i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - -jQuery.ajaxPrefilter( function( s ) { - var i; - for ( i in s.headers ) { - if ( i.toLowerCase() === "content-type" ) { - s.contentType = s.headers[ i ] || ""; - } - } -} ); - - -jQuery._evalUrl = function( url, options, doc ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - - // Only evaluate the response if it is successful (gh-4126) - // dataFilter is not invoked for failure responses, so using it instead - // of the default converter is kludgy but it works. - converters: { - "text script": function() {} - }, - dataFilter: function( response ) { - jQuery.globalEval( response, options, doc ); - } - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var htmlIsFunction = isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.ontimeout = - xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain or forced-by-attrs requests - if ( s.crossDomain || s.scriptAttrs ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " - - - @@ -90,7 +87,7 @@

    Quick search

    ©2022, dbt Labs. | - Powered by Sphinx 5.3.0 + Powered by Sphinx 6.1.2 & Alabaster 0.7.12 diff --git a/core/dbt/docs/build/html/index.html b/core/dbt/docs/build/html/index.html index d4238bb08c3..0c532473a06 100644 --- a/core/dbt/docs/build/html/index.html +++ b/core/dbt/docs/build/html/index.html @@ -10,9 +10,6 @@ - - - @@ -35,758 +32,747 @@

    dbt-core’s API documentation¶

    +
    +

    How to invoke dbt commands in python runtime¶

    +

    Right now the best way to invoke a command from python runtime is to use the dbtRunner we exposed

    +

    You can also pass in pre constructed object into dbtRunner, and we will use those objects instead of loading up from the disk.

    +
    # preload profile and project
    +profile = load_profile(project_dir, {}, 'testing-postgres')
    +project = load_project(project_dir, False, profile, {})
    +
    +# initialize the runner with pre-loaded profile and project
    +dbt = dbtRunner(profile=profile, project=project)
    +# run the command, this will use the pre-loaded profile and project instead of loading
    +res, success = dbt.invoke(cli_args)
    +
    +
    +

    For the full example code, you can refer to core/dbt/cli/example.py

    +
    +
    +

    API documentation¶

    -

    Command: build¶

    +

    Command: build¶

    -

    defer¶

    +

    defer¶

    Type: boolean

    If set, defer to the state variable for resolving unselected nodes.

    -

    exclude¶

    +

    exclude¶

    Type: string

    Specify the nodes to exclude.

    -

    fail_fast¶

    +

    fail_fast¶

    Type: boolean

    Stop execution on first failure.

    -

    full_refresh¶

    +

    full_refresh¶

    Type: boolean

    If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

    -

    indirect_selection¶

    +

    indirect_selection¶

    Type: choice: [‘eager’, ‘cautious’]

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    -
    -

    log_path¶

    -

    Type: path

    -

    Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

    -
    -
    -

    models¶

    -

    Type: string

    -

    Specify the nodes to include.

    -
    -

    profile¶

    +

    profile¶

    Type: string

    Which profile to load. Overrides setting in dbt_project.yml.

    -

    profiles_dir¶

    +

    profiles_dir¶

    Type: path

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    -

    project_dir¶

    +

    project_dir¶

    Type: path

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +

    select¶

    +

    Type: string

    +

    Specify the nodes to include.

    +
    -

    selector¶

    +

    selector¶

    Type: string

    The selector name to use, as defined in selectors.yml

    -

    show¶

    +

    show¶

    Type: boolean

    Show a sample of the loaded data in the terminal

    -

    state¶

    +

    state¶

    Type: path

    If set, use the given directory as the source for json files to compare with this project.

    -

    store_failures¶

    +

    store_failures¶

    Type: boolean

    Store test results (failing rows) in the database

    -

    target¶

    +

    target¶

    Type: string

    Which target to load for the given profile

    -

    target_path¶

    +

    target_path¶

    Type: path

    Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

    -

    threads¶

    +

    threads¶

    Type: int

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    -

    vars¶

    +

    vars¶

    Type: YAML

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    version_check¶

    +

    version_check¶

    Type: boolean

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    -

    Command: clean¶

    +

    Command: clean¶

    -

    profile¶

    +

    profile¶

    Type: string

    Which profile to load. Overrides setting in dbt_project.yml.

    -

    profiles_dir¶

    +

    profiles_dir¶

    Type: path

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    -

    project_dir¶

    +

    project_dir¶

    Type: path

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    -

    target¶

    +

    target¶

    Type: string

    Which target to load for the given profile

    -

    vars¶

    +

    vars¶

    Type: YAML

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    Command: compile¶

    +

    Command: compile¶

    -

    defer¶

    +

    defer¶

    Type: boolean

    If set, defer to the state variable for resolving unselected nodes.

    -

    exclude¶

    +

    exclude¶

    Type: string

    Specify the nodes to exclude.

    -

    full_refresh¶

    +

    full_refresh¶

    Type: boolean

    If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

    -
    -

    log_path¶

    -

    Type: path

    -

    Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

    -
    -
    -

    models¶

    -

    Type: string

    -

    Specify the nodes to include.

    -
    -

    parse_only¶

    +

    parse_only¶

    Type: boolean

    TODO: No help text currently available

    -

    profile¶

    +

    profile¶

    Type: string

    Which profile to load. Overrides setting in dbt_project.yml.

    -

    profiles_dir¶

    +

    profiles_dir¶

    Type: path

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    -

    project_dir¶

    +

    project_dir¶

    Type: path

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +

    select¶

    +

    Type: string

    +

    Specify the nodes to include.

    +
    -

    selector¶

    +

    selector¶

    Type: string

    The selector name to use, as defined in selectors.yml

    -

    state¶

    +

    state¶

    Type: path

    If set, use the given directory as the source for json files to compare with this project.

    -

    target¶

    +

    target¶

    Type: string

    Which target to load for the given profile

    -

    target_path¶

    +

    target_path¶

    Type: path

    Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

    -

    threads¶

    +

    threads¶

    Type: int

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    -

    vars¶

    +

    vars¶

    Type: YAML

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    version_check¶

    +

    version_check¶

    Type: boolean

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    -

    Command: debug¶

    +

    Command: debug¶

    -

    config_dir¶

    +

    config_dir¶

    Type: string

    If specified, DBT will show path information for this project

    -

    profile¶

    +

    profile¶

    Type: string

    Which profile to load. Overrides setting in dbt_project.yml.

    -

    profiles_dir¶

    +

    profiles_dir¶

    Type: path

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    -

    project_dir¶

    +

    project_dir¶

    Type: path

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    -

    target¶

    +

    target¶

    Type: string

    Which target to load for the given profile

    -

    vars¶

    +

    vars¶

    Type: YAML

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    version_check¶

    +

    version_check¶

    Type: boolean

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    -

    Command: deps¶

    +

    Command: deps¶

    -

    profile¶

    +

    profile¶

    Type: string

    Which profile to load. Overrides setting in dbt_project.yml.

    -

    profiles_dir¶

    +

    profiles_dir¶

    Type: path

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    -

    project_dir¶

    +

    project_dir¶

    Type: path

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    -

    target¶

    +

    target¶

    Type: string

    Which target to load for the given profile

    -

    vars¶

    +

    vars¶

    Type: YAML

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    Command: docs¶

    -

    Command: init¶

    +

    Command: docs¶

    +

    Command: init¶

    -

    profile¶

    +

    profile¶

    Type: string

    Which profile to load. Overrides setting in dbt_project.yml.

    -

    profiles_dir¶

    +

    profiles_dir¶

    Type: path

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    -

    project_dir¶

    +

    project_dir¶

    Type: path

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    -

    skip_profile_setup¶

    +

    skip_profile_setup¶

    Type: boolean

    Skip interative profile setup.

    -

    target¶

    +

    target¶

    Type: string

    Which target to load for the given profile

    -

    vars¶

    +

    vars¶

    Type: YAML

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    Command: list¶

    +

    Command: list¶

    -

    exclude¶

    +

    exclude¶

    Type: string

    Specify the nodes to exclude.

    -

    indirect_selection¶

    +

    indirect_selection¶

    Type: choice: [‘eager’, ‘cautious’]

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    -
    -

    models¶

    -

    Type: string

    -

    Specify the nodes to include.

    -
    -

    output¶

    +

    output¶

    Type: choice: [‘json’, ‘name’, ‘path’, ‘selector’]

    TODO: No current help text

    -

    output_keys¶

    +

    output_keys¶

    Type: string

    TODO: No current help text

    -

    profile¶

    +

    profile¶

    Type: string

    Which profile to load. Overrides setting in dbt_project.yml.

    -

    profiles_dir¶

    +

    profiles_dir¶

    Type: path

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    -

    project_dir¶

    +

    project_dir¶

    Type: path

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    -

    resource_type¶

    +

    resource_type¶

    Type: choice: [‘metric’, ‘source’, ‘analysis’, ‘model’, ‘test’, ‘exposure’, ‘snapshot’, ‘seed’, ‘default’, ‘all’]

    TODO: No current help text

    +
    +

    select¶

    +

    Type: string

    +

    Specify the nodes to include.

    +
    -

    selector¶

    +

    selector¶

    Type: string

    The selector name to use, as defined in selectors.yml

    -

    state¶

    +

    state¶

    Type: path

    If set, use the given directory as the source for json files to compare with this project.

    -

    target¶

    +

    target¶

    Type: string

    Which target to load for the given profile

    -

    vars¶

    +

    vars¶

    Type: YAML

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    Command: parse¶

    +

    Command: parse¶

    -

    compile¶

    +

    compile¶

    Type: boolean

    TODO: No help text currently available

    -
    -

    log_path¶

    -

    Type: path

    -

    Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

    -
    -

    profile¶

    +

    profile¶

    Type: string

    Which profile to load. Overrides setting in dbt_project.yml.

    -

    profiles_dir¶

    +

    profiles_dir¶

    Type: path

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    -

    project_dir¶

    +

    project_dir¶

    Type: path

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    -

    target¶

    +

    target¶

    Type: string

    Which target to load for the given profile

    -

    target_path¶

    +

    target_path¶

    Type: path

    Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

    -

    threads¶

    +

    threads¶

    Type: int

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    -

    vars¶

    +

    vars¶

    Type: YAML

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    version_check¶

    +

    version_check¶

    Type: boolean

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    -

    write_manifest¶

    +

    write_manifest¶

    Type: boolean

    TODO: No help text currently available

    -

    Command: run¶

    +

    Command: run¶

    -

    defer¶

    +

    defer¶

    Type: boolean

    If set, defer to the state variable for resolving unselected nodes.

    -

    exclude¶

    +

    exclude¶

    Type: string

    Specify the nodes to exclude.

    -

    fail_fast¶

    +

    fail_fast¶

    Type: boolean

    Stop execution on first failure.

    -

    full_refresh¶

    +

    full_refresh¶

    Type: boolean

    If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

    -
    -

    log_path¶

    -

    Type: path

    -

    Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

    -
    -
    -

    models¶

    -

    Type: string

    -

    Specify the nodes to include.

    -
    -

    profile¶

    +

    profile¶

    Type: string

    Which profile to load. Overrides setting in dbt_project.yml.

    -

    profiles_dir¶

    +

    profiles_dir¶

    Type: path

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    -

    project_dir¶

    +

    project_dir¶

    Type: path

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +

    select¶

    +

    Type: string

    +

    Specify the nodes to include.

    +
    -

    selector¶

    +

    selector¶

    Type: string

    The selector name to use, as defined in selectors.yml

    -

    state¶

    +

    state¶

    Type: path

    If set, use the given directory as the source for json files to compare with this project.

    -

    target¶

    +

    target¶

    Type: string

    Which target to load for the given profile

    -

    target_path¶

    +

    target_path¶

    Type: path

    Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

    -

    threads¶

    +

    threads¶

    Type: int

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    -

    vars¶

    +

    vars¶

    Type: YAML

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    version_check¶

    +

    version_check¶

    Type: boolean

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    -

    Command: run_operation¶

    +

    Command: run_operation¶

    -

    args¶

    +

    args¶

    Type: YAML

    Supply arguments to the macro. This dictionary will be mapped to the keyword arguments defined in the selected macro. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    profile¶

    +

    profile¶

    Type: string

    Which profile to load. Overrides setting in dbt_project.yml.

    -

    profiles_dir¶

    +

    profiles_dir¶

    Type: path

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    -

    project_dir¶

    +

    project_dir¶

    Type: path

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    -

    target¶

    +

    target¶

    Type: string

    Which target to load for the given profile

    -

    vars¶

    +

    vars¶

    Type: YAML

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    Command: seed¶

    +

    Command: seed¶

    -

    exclude¶

    +

    exclude¶

    Type: string

    Specify the nodes to exclude.

    -

    full_refresh¶

    +

    full_refresh¶

    Type: boolean

    If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

    -
    -

    log_path¶

    -

    Type: path

    -

    Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

    -
    -
    -

    models¶

    -

    Type: string

    -

    Specify the nodes to include.

    -
    -

    profile¶

    +

    profile¶

    Type: string

    Which profile to load. Overrides setting in dbt_project.yml.

    -

    profiles_dir¶

    +

    profiles_dir¶

    Type: path

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    -

    project_dir¶

    +

    project_dir¶

    Type: path

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +

    select¶

    +

    Type: string

    +

    Specify the nodes to include.

    +
    -

    selector¶

    +

    selector¶

    Type: string

    The selector name to use, as defined in selectors.yml

    -

    show¶

    +

    show¶

    Type: boolean

    Show a sample of the loaded data in the terminal

    -

    state¶

    +

    state¶

    Type: path

    If set, use the given directory as the source for json files to compare with this project.

    -

    target¶

    +

    target¶

    Type: string

    Which target to load for the given profile

    -

    target_path¶

    +

    target_path¶

    Type: path

    Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

    -

    threads¶

    +

    threads¶

    Type: int

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    -

    vars¶

    +

    vars¶

    Type: YAML

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    version_check¶

    +

    version_check¶

    Type: boolean

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    -

    Command: snapshot¶

    +

    Command: snapshot¶

    -

    defer¶

    +

    defer¶

    Type: boolean

    If set, defer to the state variable for resolving unselected nodes.

    -

    exclude¶

    +

    exclude¶

    Type: string

    Specify the nodes to exclude.

    -
    -

    models¶

    -

    Type: string

    -

    Specify the nodes to include.

    -
    -

    profile¶

    +

    profile¶

    Type: string

    Which profile to load. Overrides setting in dbt_project.yml.

    -

    profiles_dir¶

    +

    profiles_dir¶

    Type: path

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    -

    project_dir¶

    +

    project_dir¶

    Type: path

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +

    select¶

    +

    Type: string

    +

    Specify the nodes to include.

    +
    -

    selector¶

    +

    selector¶

    Type: string

    The selector name to use, as defined in selectors.yml

    -

    state¶

    +

    state¶

    Type: path

    If set, use the given directory as the source for json files to compare with this project.

    -

    target¶

    +

    target¶

    Type: string

    Which target to load for the given profile

    -

    threads¶

    +

    threads¶

    Type: int

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    -

    vars¶

    +

    vars¶

    Type: YAML

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    Command: source¶

    -

    Command: test¶

    +

    Command: source¶

    +

    Command: test¶

    -

    defer¶

    +

    defer¶

    Type: boolean

    If set, defer to the state variable for resolving unselected nodes.

    -

    exclude¶

    +

    exclude¶

    Type: string

    Specify the nodes to exclude.

    -

    fail_fast¶

    +

    fail_fast¶

    Type: boolean

    Stop execution on first failure.

    -

    indirect_selection¶

    +

    indirect_selection¶

    Type: choice: [‘eager’, ‘cautious’]

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    -
    -

    log_path¶

    -

    Type: path

    -

    Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

    -
    -
    -

    models¶

    -

    Type: string

    -

    Specify the nodes to include.

    -
    -

    profile¶

    +

    profile¶

    Type: string

    Which profile to load. Overrides setting in dbt_project.yml.

    -

    profiles_dir¶

    +

    profiles_dir¶

    Type: path

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    -

    project_dir¶

    +

    project_dir¶

    Type: path

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +

    select¶

    +

    Type: string

    +

    Specify the nodes to include.

    +
    -

    selector¶

    +

    selector¶

    Type: string

    The selector name to use, as defined in selectors.yml

    -

    state¶

    +

    state¶

    Type: path

    If set, use the given directory as the source for json files to compare with this project.

    -

    store_failures¶

    +

    store_failures¶

    Type: boolean

    Store test results (failing rows) in the database

    -

    target¶

    +

    target¶

    Type: string

    Which target to load for the given profile

    -

    target_path¶

    +

    target_path¶

    Type: path

    Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

    -

    threads¶

    +

    threads¶

    Type: int

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    -

    vars¶

    +

    vars¶

    Type: YAML

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    -

    version_check¶

    +

    version_check¶

    Type: boolean

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    +
    @@ -840,7 +826,7 @@

    Quick search

    ©2022, dbt Labs. | - Powered by Sphinx 5.3.0 + Powered by Sphinx 6.1.2 & Alabaster 0.7.12 | diff --git a/core/dbt/docs/build/html/search.html b/core/dbt/docs/build/html/search.html index f94c6ef0835..006a67abd9b 100644 --- a/core/dbt/docs/build/html/search.html +++ b/core/dbt/docs/build/html/search.html @@ -10,9 +10,6 @@ - - - @@ -109,7 +106,7 @@

    Related Topics

    ©2022, dbt Labs. | - Powered by Sphinx 5.3.0 + Powered by Sphinx 6.1.2 & Alabaster 0.7.12 diff --git a/core/dbt/docs/build/html/searchindex.js b/core/dbt/docs/build/html/searchindex.js index 25dd9fd3af5..0555d3c1848 100644 --- a/core/dbt/docs/build/html/searchindex.js +++ b/core/dbt/docs/build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "string": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "from": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "select": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "those": 0, "have": 0, "been": 0, "explicitli": 0, "path": 0, "configur": 0, "log": 0, "onli": 0, "appli": 0, "thi": 0, "current": 0, "overrid": 0, "dbt_log_path": 0, "i": 0, "includ": 0, "which": 0, "load": 0, "dbt_project": 0, "yml": 0, "directori": 0, "look": 0, "file": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "The": 0, "name": 0, "us": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "project": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "metric": 0, "analysi": 0, "exposur": 0, "macro": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "command": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "log_path": 0, "model": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "resource_typ": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "log_path": [[0, "build|log_path"], [0, "compile|log_path"], [0, "parse|log_path"], [0, "run|log_path"], [0, "seed|log_path"], [0, "test|log_path"]], "models": [[0, "build|models"], [0, "compile|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"]], "output": [[0, "list|output"]], "output_keys": [[0, "list|output_keys"]], "resource_type": [[0, "list|resource_type"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file +Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"right": 0, "now": 0, "best": 0, "wai": 0, "from": 0, "i": 0, "us": 0, "dbtrunner": 0, "we": 0, "expos": 0, "you": 0, "can": 0, "also": 0, "pass": 0, "pre": 0, "construct": 0, "object": 0, "those": 0, "instead": 0, "load": 0, "up": 0, "disk": 0, "preload": 0, "project": 0, "load_profil": 0, "postgr": 0, "load_project": 0, "fals": 0, "initi": 0, "runner": 0, "thi": 0, "re": 0, "success": 0, "cli_arg": 0, "For": 0, "full": 0, "exampl": 0, "code": 0, "refer": 0, "cli": 0, "py": 0, "type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "string": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "model": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "have": 0, "been": 0, "explicitli": 0, "which": 0, "overrid": 0, "dbt_project": 0, "yml": 0, "path": 0, "directori": 0, "look": 0, "file": 0, "current": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "includ": 0, "The": 0, "name": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "configur": 0, "onli": 0, "appli": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "metric": 0, "analysi": 0, "exposur": 0, "macro": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "how": 0, "invok": 0, "command": 0, "python": 0, "runtim": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "select": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "resource_typ": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "How to invoke dbt commands in python runtime": [[0, "how-to-invoke-dbt-commands-in-python-runtime"]], "API documentation": [[0, "api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "select": [[0, "build|select"], [0, "compile|select"], [0, "list|select"], [0, "run|select"], [0, "seed|select"], [0, "snapshot|select"], [0, "test|select"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"]], "output": [[0, "list|output"]], "output_keys": [[0, "list|output_keys"]], "resource_type": [[0, "list|resource_type"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file diff --git a/core/dbt/task/deps.py b/core/dbt/task/deps.py index 425f7771995..d4d086acbce 100644 --- a/core/dbt/task/deps.py +++ b/core/dbt/task/deps.py @@ -25,7 +25,6 @@ from dbt.task.base import BaseTask, move_to_nearest_project_dir - from dbt.config import Project @@ -91,4 +90,4 @@ def run(self) -> None: ) if packages_to_upgrade: fire_event(EmptyLine()) - fire_event(DepsNotifyUpdatesAvailable(packages=ListOfStrings(packages_to_upgrade))) \ No newline at end of file + fire_event(DepsNotifyUpdatesAvailable(packages=ListOfStrings(packages_to_upgrade))) From 53127daad83be7744c29ecadd9c5d69aa8273cde Mon Sep 17 00:00:00 2001 From: Kshitij Aranke Date: Wed, 11 Jan 2023 15:01:50 -0800 Subject: [PATCH 17/54] [CT-921] `dbt compile` works in click (#5545) (#6586) Co-authored-by: Github Build Bot resolves https://github.com/dbt-labs/dbt-core/issues/5545 --- .../Under the Hood-20230111-145143.yaml | 6 ++++++ core/dbt/cli/main.py | 9 +++++++-- .../docs/build/doctrees/environment.pickle | Bin 182190 -> 182098 bytes core/dbt/docs/build/html/genindex.html | 2 +- core/dbt/docs/build/html/index.html | 2 +- core/dbt/docs/build/html/search.html | 2 +- 6 files changed, 16 insertions(+), 5 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230111-145143.yaml diff --git a/.changes/unreleased/Under the Hood-20230111-145143.yaml b/.changes/unreleased/Under the Hood-20230111-145143.yaml new file mode 100644 index 00000000000..2f5c9c3fc1b --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230111-145143.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: '[CT-921] dbt compile works in click' +time: 2023-01-11T14:51:43.324107-08:00 +custom: + Author: aranke + Issue: "5545" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index e47b6f58710..2d66733f37e 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -10,6 +10,7 @@ from dbt.config.profile import Profile from dbt.contracts.graph.manifest import Manifest from dbt.task.clean import CleanTask +from dbt.task.compile import CompileTask from dbt.task.deps import DepsTask from dbt.task.run import RunTask from dbt.task.test import TestTask @@ -210,8 +211,12 @@ def docs_serve(ctx, **kwargs): def compile(ctx, **kwargs): """Generates executable SQL from source, model, test, and analysis files. Compiled SQL files are written to the target/ directory.""" - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") - return None, True + config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) + task = CompileTask(ctx.obj["flags"], config) + + results = task.run() + success = task.interpret_results(results) + return results, success # dbt debug diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index 9c21016481a75defca177eac32ef285685f42bf1..6a2d234ffe5c0e955d6cd8258c3911cb82ac3cc8 100644 GIT binary patch delta 135 zcmZ42&wZ(ndqcB4>v@*dt$dq1qavaq`8~!0AQr82`b9 zb}CL540D_QK!izs`uX>azhUAx;NlwFwLUVMGr}Z(BS{?m#K_3YKDGTg0}xE#D8{5b Z{qcWBi|GX|Oi9x>6flWwoyD}{7yybUH$wmb diff --git a/core/dbt/docs/build/html/genindex.html b/core/dbt/docs/build/html/genindex.html index 6c5ed300f92..ceac455e800 100644 --- a/core/dbt/docs/build/html/genindex.html +++ b/core/dbt/docs/build/html/genindex.html @@ -87,7 +87,7 @@

    Quick search

    ©2022, dbt Labs. | - Powered by Sphinx 6.1.2 + Powered by Sphinx 6.1.3 & Alabaster 0.7.12 diff --git a/core/dbt/docs/build/html/index.html b/core/dbt/docs/build/html/index.html index 0c532473a06..3253006b935 100644 --- a/core/dbt/docs/build/html/index.html +++ b/core/dbt/docs/build/html/index.html @@ -826,7 +826,7 @@

    Quick search

    ©2022, dbt Labs. | - Powered by Sphinx 6.1.2 + Powered by Sphinx 6.1.3 & Alabaster 0.7.12 | diff --git a/core/dbt/docs/build/html/search.html b/core/dbt/docs/build/html/search.html index 006a67abd9b..e7a0040c970 100644 --- a/core/dbt/docs/build/html/search.html +++ b/core/dbt/docs/build/html/search.html @@ -106,7 +106,7 @@

    Related Topics

    ©2022, dbt Labs. | - Powered by Sphinx 6.1.2 + Powered by Sphinx 6.1.3 & Alabaster 0.7.12 From 1913eac5ed2721119ca161d61fbc6f771033640b Mon Sep 17 00:00:00 2001 From: Chenyu Li Date: Wed, 18 Jan 2023 16:01:58 -0800 Subject: [PATCH 18/54] Click snapshot as click command (#5972) (#6640) --- .../Under the Hood-20230117-162505.yaml | 6 ++++++ core/dbt/cli/main.py | 11 +++++++++-- .../docs/build/doctrees/environment.pickle | Bin 182098 -> 182098 bytes .../dbt/docs/build/html/_static/alabaster.css | 4 +++- core/dbt/docs/build/html/genindex.html | 2 +- core/dbt/docs/build/html/index.html | 2 +- core/dbt/docs/build/html/search.html | 2 +- 7 files changed, 21 insertions(+), 6 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230117-162505.yaml diff --git a/.changes/unreleased/Under the Hood-20230117-162505.yaml b/.changes/unreleased/Under the Hood-20230117-162505.yaml new file mode 100644 index 00000000000..5980bd2632a --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230117-162505.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: dbt snapshot works in click +time: 2023-01-17T16:25:05.973769-08:00 +custom: + Author: ChenyuLInx + Issue: "5554" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 2d66733f37e..4ebcc4722cd 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -14,6 +14,7 @@ from dbt.task.deps import DepsTask from dbt.task.run import RunTask from dbt.task.test import TestTask +from dbt.task.snapshot import SnapshotTask # CLI invocation @@ -401,10 +402,16 @@ def seed(ctx, **kwargs): @p.threads @p.vars @requires.preflight +@requires.profile +@requires.project def snapshot(ctx, **kwargs): """Execute snapshots defined in your project""" - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") - return None, True + config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) + task = SnapshotTask(ctx.obj["flags"], config) + + results = task.run() + success = task.interpret_results(results) + return results, success # dbt source diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index 6a2d234ffe5c0e955d6cd8258c3911cb82ac3cc8..9c9eb6d15e3b1372bfe01bb717b2eb7f74c37ba2 100644 GIT binary patch delta 29 lcmccA$9<`fdqa~v+sE@fy%TDiJLKCtQuick search | Powered by Sphinx 6.1.3 - & Alabaster 0.7.12 + & Alabaster 0.7.13 diff --git a/core/dbt/docs/build/html/index.html b/core/dbt/docs/build/html/index.html index 3253006b935..0c4438bc8ea 100644 --- a/core/dbt/docs/build/html/index.html +++ b/core/dbt/docs/build/html/index.html @@ -827,7 +827,7 @@

    Quick search

    | Powered by Sphinx 6.1.3 - & Alabaster 0.7.12 + & Alabaster 0.7.13 | Related Topics | Powered by Sphinx 6.1.3 - & Alabaster 0.7.12 + & Alabaster 0.7.13 From 30a1595f7265c8a09b982bc91671e6bf26fb647d Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Thu, 19 Jan 2023 09:28:32 -0500 Subject: [PATCH 19/54] click working with list (#6641) * list working with click --- .../Under the Hood-20230117-213729.yaml | 6 + core/dbt/cli/main.py | 29 +++- core/dbt/cli/option_types.py | 12 +- core/dbt/cli/options.py | 44 ++++++ core/dbt/cli/params.py | 30 ++-- .../docs/build/doctrees/environment.pickle | Bin 182098 -> 203622 bytes core/dbt/docs/build/doctrees/index.doctree | Bin 87716 -> 97763 bytes core/dbt/docs/build/html/index.html | 133 +++++++++++++++--- core/dbt/docs/build/html/searchindex.js | 2 +- 9 files changed, 223 insertions(+), 33 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230117-213729.yaml create mode 100644 core/dbt/cli/options.py diff --git a/.changes/unreleased/Under the Hood-20230117-213729.yaml b/.changes/unreleased/Under the Hood-20230117-213729.yaml new file mode 100644 index 00000000000..8500a0a70b7 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230117-213729.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: dbt list working with click +time: 2023-01-17T21:37:29.91632-05:00 +custom: + Author: michelleark + Issue: "5549" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 4ebcc4722cd..80e0ed7e83c 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -15,15 +15,11 @@ from dbt.task.run import RunTask from dbt.task.test import TestTask from dbt.task.snapshot import SnapshotTask +from dbt.task.list import ListTask # CLI invocation def cli_runner(): - # Alias "list" to "ls" - ls = copy(cli.commands["list"]) - ls.hidden = True - cli.add_command(ls, "ls") - # Run the cli cli() @@ -155,6 +151,7 @@ def docs(ctx, **kwargs): @p.compile_docs @p.defer @p.exclude +@p.models @p.profile @p.profiles_dir @p.project_dir @@ -196,6 +193,7 @@ def docs_serve(ctx, **kwargs): @p.defer @p.exclude @p.full_refresh +@p.models @p.parse_only @p.profile @p.profiles_dir @@ -278,6 +276,7 @@ def init(ctx, **kwargs): @click.pass_context @p.exclude @p.indirect_selection +@p.models @p.output @p.output_keys @p.profile @@ -290,10 +289,21 @@ def init(ctx, **kwargs): @p.target @p.vars @requires.preflight +@requires.profile +@requires.project def list(ctx, **kwargs): """List the resources in your project""" - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") - return None, True + config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) + task = ListTask(ctx.obj["flags"], config) + + results = task.run() + success = task.interpret_results(results) + return results, success + + +ls = copy(cli.commands["list"]) +ls.hidden = True +cli.add_command(ls, "ls") # dbt parse @@ -323,6 +333,7 @@ def parse(ctx, **kwargs): @p.exclude @p.fail_fast @p.full_refresh +@p.models @p.profile @p.profiles_dir @p.project_dir @@ -368,6 +379,7 @@ def run_operation(ctx, **kwargs): @click.pass_context @p.exclude @p.full_refresh +@p.models @p.profile @p.profiles_dir @p.project_dir @@ -392,6 +404,7 @@ def seed(ctx, **kwargs): @click.pass_context @p.defer @p.exclude +@p.models @p.profile @p.profiles_dir @p.project_dir @@ -425,6 +438,7 @@ def source(ctx, **kwargs): @source.command("freshness") @click.pass_context @p.exclude +@p.models @p.output_path # TODO: Is this ok to re-use? We have three different output params, how much can we consolidate? @p.profile @p.profiles_dir @@ -449,6 +463,7 @@ def freshness(ctx, **kwargs): @p.exclude @p.fail_fast @p.indirect_selection +@p.models @p.profile @p.profiles_dir @p.project_dir diff --git a/core/dbt/cli/option_types.py b/core/dbt/cli/option_types.py index 1df8bef1f7a..e0294c2a096 100644 --- a/core/dbt/cli/option_types.py +++ b/core/dbt/cli/option_types.py @@ -1,4 +1,4 @@ -from click import ParamType +from click import ParamType, Choice from dbt.config.utils import parse_cli_vars from dbt.exceptions import ValidationException @@ -33,3 +33,13 @@ def convert(self, value, param, ctx): return None else: return value + + +class ChoiceTuple(Choice): + name = "CHOICE_TUPLE" + + def convert(self, value, param, ctx): + for value_item in value: + super().convert(value_item, param, ctx) + + return value diff --git a/core/dbt/cli/options.py b/core/dbt/cli/options.py new file mode 100644 index 00000000000..4ac705dc140 --- /dev/null +++ b/core/dbt/cli/options.py @@ -0,0 +1,44 @@ +import click + + +# Implementation from: https://stackoverflow.com/a/48394004 +# Note MultiOption options must be specified with type=tuple or type=ChoiceTuple (https://github.com/pallets/click/issues/2012) +class MultiOption(click.Option): + def __init__(self, *args, **kwargs): + self.save_other_options = kwargs.pop("save_other_options", True) + nargs = kwargs.pop("nargs", -1) + assert nargs == -1, "nargs, if set, must be -1 not {}".format(nargs) + super(MultiOption, self).__init__(*args, **kwargs) + self._previous_parser_process = None + self._eat_all_parser = None + + def add_to_parser(self, parser, ctx): + def parser_process(value, state): + # method to hook to the parser.process + done = False + value = [value] + if self.save_other_options: + # grab everything up to the next option + while state.rargs and not done: + for prefix in self._eat_all_parser.prefixes: + if state.rargs[0].startswith(prefix): + done = True + if not done: + value.append(state.rargs.pop(0)) + else: + # grab everything remaining + value += state.rargs + state.rargs[:] = [] + value = tuple(value) + # call the actual process + self._previous_parser_process(value, state) + + retval = super(MultiOption, self).add_to_parser(parser, ctx) + for name in self.opts: + our_parser = parser._long_opt.get(name) or parser._short_opt.get(name) + if our_parser: + self._eat_all_parser = our_parser + self._previous_parser_process = our_parser.process + our_parser.process = parser_process + break + return retval diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index 7795fb9d218..6173081ad75 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -1,7 +1,8 @@ from pathlib import Path, PurePath import click -from dbt.cli.option_types import YAML +from dbt.cli.options import MultiOption +from dbt.cli.option_types import YAML, ChoiceTuple from dbt.cli.resolvers import default_project_dir, default_profiles_dir @@ -80,7 +81,9 @@ hidden=True, ) -exclude = click.option("--exclude", envvar=None, help="Specify the nodes to exclude.") +exclude = click.option( + "--exclude", envvar=None, type=tuple, cls=MultiOption, help="Specify the nodes to exclude." +) fail_fast = click.option( "--fail-fast/--no-fail-fast", @@ -133,13 +136,22 @@ hidden=True, ) +models = click.option( + "--models", + "--model", + "-m", + envvar=None, + help="Specify the nodes to include.", + cls=MultiOption, + type=tuple, +) output = click.option( "--output", envvar=None, help="TODO: No current help text", type=click.Choice(["json", "name", "path", "selector"], case_sensitive=False), - default="name", + default="selector", ) output_keys = click.option( @@ -233,10 +245,11 @@ ) resource_type = click.option( + "--resource-types", "--resource-type", envvar=None, help="TODO: No current help text", - type=click.Choice( + type=ChoiceTuple( [ "metric", "source", @@ -251,16 +264,17 @@ ], case_sensitive=False, ), - default="default", + cls=MultiOption, + default=(), ) select = click.option( - "-m", "-s", - "select", + "--select", envvar=None, help="Specify the nodes to include.", - multiple=True, + cls=MultiOption, + type=tuple, ) selector = click.option( diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index 9c9eb6d15e3b1372bfe01bb717b2eb7f74c37ba2..71d7f331d797d93ef8b33b254985764d5e25a709 100644 GIT binary patch delta 24146 zcmb81cR*A}6Tq{Jq5>jCiXim>L9k#!M64-lj0Vv}?4V*ngcBvk1QkmZ6-8WdHL-W2 zNi-HTYU~;k8^#tBTZ$>h8jU51`DV-Az4uP?z2_fzZ*S)}yR$R1vwH_;<~rZl;Zh~P zYAN1QWnz%y{hqneE&FdR2-!+q^Y+6h@?9p_zJ7~<)9W_mgi6%dd4W-`4fl&qMaOdA|WktBe~F`Oc1(ysUn z*-~OAQ|1Se_NB@6>HtuGtW+>^7nsSUH*?58l-rBcFZChw7bes47hx)HizH^xB6FGl z%g7fI`~jOwI3XV-liBxpbc*}IHG zs*&JJ(PZ>;^VZ#$@(?LsVJ2S70!ix1NOHQUKK-OCipQ@zlZy0S_%&zJy=!l>Y}MV+g^ zy_uA+2_>7?%Y}{KAZC1HgIL%p8_lG6eGu{9Bo}t^ChNlP`PfYEZww=^ewJy4(!olhh?Aj#_-JSP`oo7KXf6JdqvRfBIHJH?~KP!U6@7Q zCym-flihp7xJK+1^RBuNEWam^EZisO9k|~*?=}a`BzK>QoIWV${n{Zp?`MbPyssTL zlaCLEl2J$Gyni|(_~fXV_oOe(B3aB2T{*^Mo#=^N=^W`-LB>M%tgKXPK8X=~~_ZySTEsWs{rbovf z2{DehPKtSYoHmot6MzB7~R-vp3@-&qZT z8`a_~8&k__1@h#wn08DpX@`whdSmFoZLt13Kis#La&etVmCFrrDgf95pS&DRO0QL( zNwTl=Op-z#es3l}Tn!|1e@G_F6FLy5AM?n7R++^2C-c@@*Q+CP=_fNu{~?MDyOHew zAK8~&xiP!4)J=+iR_3j=U(BSn)gJt?9nMB>> zy-3mxA5wNRne4jJjI{k-Sn}5IqR-6RW>WD>D5-l##`4NLqC?VMvDOFgnaQG?fu!es zvDW0ueYu`f9++jae(9l^Jh^Qm7az(s@A;?fFf`N>CS(&T<9>H9c2#x*B>Om=S0 z;PioG$D7kNJCWv1a^mrEZhVoTUM{aGmDdqJrQnaRVaL8Y6K*b8VRLOrULI=p~hc;N*)9>K=s_Hr~m zkDwpy4D_G_ytw^OUD~w@gyrV*J@IvS=A-Bex%MoDPOplDdtPxT?iXj!1V_{xUva1I z;~+xYKCW822>IZe9(3|0^cM1>_nc4)_VBQJHw~#SC7x(7^LH9u8%6*QPrs=R zJ;38>nz!i6T%JDghLM8L(;Oe@2ZaX>^p*XF)PWb^_VkZBu$HVip(Fidr#k+u)9wYd zY5=?d*Qdn+(1{f&)ISi6#VQbbDiBHUr}cuNAGkl=6Ab-Wp+Y-_So-}P0>)vb3OyeR z-5>;LrU|?OHs(Z)tE($A?VInFOfH#F5}9!f%C(F~o;=q*@Pu6~(o&@k6I&Kz0W;>>kXFF|8f78)pM{TMBjugs$;>C63U!+4j_jrBnsUb9^o zLJrLq(yRhQL`cu$(qc8{#YpjaOx%k{-%AYu0nZEpL5TJV1S`?dF@ZQN29STD1wGLS zb>jv*TpNP1fKF-*(RSE6ejbeI502p5?q7oN*g6PSgvHSJn@G&#+A=0PHicn4uFdxx z+O!Jt!}YvuL3whB6D_KO8iNC-wLvK?Drujp@bp)65ti|ELmV>G%utj;{o~=)v}!1b zZi<&~TNc?8K~=`wb!7s)+t~@;ZP?1GOwnO=t&hSKmW#e8%F%GoC8(I&)g05kDwhjs^JJ155*^ z@QMjV;MP7?E78>^6o)!kL13i}TJ(m+K<(dz!0I+`HSO37M!vrts!A_K0Om?Bh<}#i zhNg(F&pQxM$KI9{x4939r$nIu`dVMGOLiG*OwaVSXvyyf@A^lh0rY--x_r0<6!>m!|6zMXdyW9<0$(z^$eZ?TjzVx56Yfq>jRMI9E|7$U_4JC7m4)@;80yilgWq2rVvr7y3RwqbZx)`OpLHkRO`CW0eArYyN2+yBlVvkA*>OOIP4rr%O~)iz-=5%U`=Njg^5lsK;t0Gqv_y1k$$!! z!!uvXNBYTxb|fE!BPp5!A@*Wdn0+rM+?^jYA+vKrKR+JnpG}d%>{*_#=Tx3a&&)x^qJRODt zY3TU^!dr(zKBQ4qJyGftxZ$E4V^ zKw4akcF_~ZAa3DJ#<;TO4Xc7cNd2X3sQZB1*=#us*nk;YH zf%g0ou<*=G?Yt1xps$r8Uz`SvPr66{$C<1|b;1}SR1d9Gb;sjUHFRMGzrzVo3$NL1 ziZ&ppI#QMPR8h+3mxq@K1@8y(ExP|HX&iV4L-WM#ej14IEGH6z%Y*nTu?&tYg!oC= z^;x0faYo9Y3=)OP;WHq7QVb{S15^+H!-RREf~yAetd42-Z&4IKvhO;JUZ=~$5!`m3 zL*62%4?71094s>PrlpXXV}p6Ef?EptI3^!jY29}3f-n#q8yVDg>r6LZ1ae%k)l{_o z3J9q5U_Luuzl;(ZGPnkDK)NI*2*&_U^1UdXHvjHR|&uDv1| zOF4s|lKNZ)#8QC(Ck)0Xg89yMPDr1zjsX7<%pw7u5wegM@IBsw&sb*?f>DSfqHBb3 z|91?rMYK-{4=2_nBN~VECP=<3_G56JO)!n-AtIhz3qV2$TVxT>T?8N@M8q@hEu<5K zbkof+gApN?P;{fSH={(H1@hur(~l-^fgLap8d!kN5&T(ffW0wXtg}#`6~e<4ZxG~J zg8a@l$TPdd?09S*5VD^$S!Hbc$d75%;dDx{p|=cEd|BW>7x>gms2Z(W27!8ujXsH= zKJY`K5AP3XRW%-aP8bx0_aXBUw;DwNwlB9z&mT}k2=0s$>6 z^1Qd+zFrOCm#s}Ka=rFK3Ht^J7|9j=0k22aJ#4|~nP&vTFO_psUXgfo~F$hnvJg|k|^A!rhbA%vIAEiQYzDXErm6+OmCdyAi zc#E}#$!|Xi^E|-i$*AN6TrR|qvZ?mQ7ert71kzvi$j3ifTbT6d3P`UH;a#Yi{}GZ; z#fY85xRv1vj!(Fb<%!L<362VbiB7|@VPMu2I0mq-0Jjx=8ip}lq6EQRt!;8)LU8{u zMW#w;gkE?M$XIHRF+zkjSu)jRYm>#jhFt>pDh?CbYLS2};NuY4YMqc+5GJ$LcFVK4 z4#HR9I_LOe6B_8}eq)!M|&7J3~jk0(q%oQ}E=Q>Juz|F$T+K^LuvgW>%j*4t{z7xILT}-bTNn3#>hbp1Ol-IG0RB_+r zT!lG$2t_t?Bnd^&F*Ry{1L(Y$ac!I@;k*AtK8it)*TUb@59jMRaU=G@%OdFLx4_sj zk5%x+TKFPE@Hl)hf~L6vcli_Krx^4;Eq9N8Zb*Bu$em3%2>&E4;{O*4RlvX3!msLw zgSlgI^?JfR$loesRHg3GI9{=gm^XOpSw{C3I1WeGvl_#Z)_jI)D+V2@g@@~hhjhcu zakqLbTvF-Of03tx+eORmq@SDC2?vmqvue`M&Dar-=2l59{TsU~_*q*1Nd0{7evP+b zZ(JfRo`$in0$!|z&ou~7b-*6DOu~;lV5P;kY2jN8!k2Exp7@f4L&#UBiB)k;#qcj^ zHJml5p|K;bi~o@t);i)q1^lTN{zyN(Vk~ZqgCm)jXDN^Yw0oS&!TorHE(Zr63?Ltu zdQ=yC%8QZA8^XL`r$kYUQ2}}IFt`Mg+6t{$|N{g3s2P#=U&)u9XQHN2?s~1 zPBUGwi(>fGwHl@x)R0QCKi(uYoUMri6z~mN_*(sNp0Q3zZpT_GvmDoQ%k^`4#=0NL zGS*ndQ157waG3>8qt{28W3MQ&t;c#`<+koY)7q(guNH66#pUiCAI03c4_)D@it^@K zZWH}nn`iXp@Dy6zO9fBS!rw3mx0MbPr3SONY6aibYIs|}23zT{B1-J;SAA6QrCRt0 z`r%-kIDA+R`r`oXMEllJ+2(*2{<(fQ+jzX>C6~B56ItbFq0dyyG&p;dc|>NqyuvQY z&A7k{W?N1BqgMNM{o47mzv+j4;ocCv3f1iwez>k81gg;$9aXMcg*WJORTi!AQ3}Ui z_Q6hm=A7)IFcJG1)YodMr(X+;U6drWz}hIcjjmektNO95&Ol=XHX=xs!OU9hDE(Mf znSmAv{=#5YB|xt2PsBgpoPC@ z5dL8__QfAZ(=D@L*9weLE$v1ve7$}+F9<5+pbHzQGX6;|_bdHeUJyKp=0}(Xjj-}K zcTbDGtslz|FFrA%D0r)}swk*UD+Z|i&z(2u@;_b_B*ciK;6|*B z;2|w{zkV+Fj9+6!fcUml1;`C8_Xqu4o>Ee2z4K;^5Q_=pv#MGw`(2cz;ipQjQx@p z-d;bvvbsJ)s0wy5@h{KH%HOW2AEs43#E|O7c#fRX%}%O?o~7l^(9dOs6#b<$Y~`C9 zicHn-RaK_?NDKc^Kb&XmudQobwer)#X|0wM`nB-<{bxh51Gny~%HQzaLA5L0)z9Vm zyG|ovl~222Ul-f?%)1edeSliH*ghig6fL@(ltowI^S{L{4vLA5c+f@F77nHU)dl;r zR!h6l#2FZk>imKS3e}W$$6ksF_p?*c+kgt5PWVmf6p44}fqfMCNp|==qj-Luxklo% zdg9s&{7O6gG6Q&)WAPV~eBlk1U5?n14;mob((Fx%Z`%udDhB?G9sVZ+cw3@%ZOjWv zO`>(Cqi3rUZFSzD%ZF`=HkPwfs3y@i)WYEy1=@7MD@(LJg{uFTXuE4wcQd5AF&-zU zq)D`6wcOGAxz|W~<|3{wvOsl0#zZRZnuQh&dp1rK) zUeeFydDbD;mS+{E(F=6)e3gg(qo4bKvus2x&$5v^vn+fQT0NFv-k{r|Z68H-7K(X} zRZR93t%4WzE3jqQObKThRskQOg%33d=lQir!dZS*zzenTnfl>8yM8LUEW0YWpJ=(8 z^mA>w^_+yW+^T?|(ZWyZhx5#ukOH6RJeKfoX>dEVIYo6l^iZqez99{<*uRM=zBxRg z53Z%y8ho2*OAs&7qRSz9Ft>pZ6WFKQGy7sc1-X?SIo<%7hjSVTurELS+Yi@KkO$b2 z`x+o~PnpI|(uZabQ0?%O?Z^`ikXdq8eEPQzMA_)C4pfbPwVjR?26S+j;TMWu+E!Ir z10A!&A2xuux%(}N-#JKSq2KKAKO4pKn;o~N!Y3n#sBU&@Qup<$Ozp%QbU7m*drUff zSlm>?SEj3O{TplH4Gh9B&&TYfL zH>0sRs_$XU)pBPW+B3pQgo))||YGc0|q{fYXlR!fC`Et<{bDPP^uio)ZO7XCm#+_sAZ zG#6eT|F+7@{b=kSm6!YQ23_80+e6w(`10}CLoxE!T6ls%INw1AHy7Kh+qk zzd^WL9()OAI(%b-)ie|HR9ClCweWoXaGq2)NQ>N_p!yhXt(Ln=KbI$!d_3THR@iq>2TZ|%ips85oVFTu5iJHCuX&tx z`CQp=E4nCjG_ljs$bb&pE}g=s0L5tu{4!*x_f%E&8+JO94d{RXio@_r-@JOM;L{OhG=iSJgVx_KOBhtDvI=i4}u`0evlr;&Mf_&EmfJQMA( z*WCIyQNVTZ&AdarNZ1#s>JFi;9X`=0-gah7m-Bdek?KR3 zL3TO@8qi_e4+`R0is(ysysrwd_w4Xf4B#tE*c;>dw~0(+veL|BhG&nh^ecet?bNR^ zpq?k~lX7Aai&Yc*%8vYn0W#0m_vDH!T!MoXDetx&`KAFf&)2nE3ZHdfrm8R8TWa0a zRkY}GS9Q{E(NcV&Yx;7PS(@3Yh&7_dfDN78pZR3Izbv}@G4a> zykmzSX8_L=>T+q9Gpkj0Sz<+aQVb)=-3cueDzt;fXmh5gwyKGvg!vA20 zzh)F~OGGsigme0AP;F+8a12s^8tfoibUCLj5yfzPA9`vx6s83qs*0>AI~@@QbZ|5A zTcp?J6z*(PxzB5M_|69KwhseyB!2iu*x$djr9b@nJNT#ldULj&woC)sDod@UQupnh z|4|Fiw^LnYKs8URJC)0H<73q_ZMVaJY5>o3>$f1l@<;M!RsOhWM?PnO%ya9r1aW~i z@H6nQ<6BfE&0lu-#|H4~1RK&ygb7;++G?w+Dh_O=jTL{#I zyEqb1tPprbN}_CkfU;W#g{E#93lr(1gSctP`~2wx{_{(a=$5e24XWxrk4zwQBvqQCOs`@@bHq18_+ns zL&kHF&OLai;H0!+=_NK|gl$AWE0E6S@eSZMM^5EdBF#L48^%^Z511EIGQA|@3VXV0 dd1AWd0N}k_>vVCx zW6k_p#SSGkr}{cnRXELCIHh#9?|>$x(Y2UT@7Ihj{35Gc{RnLK%vAkPdsln-$LU9(QNW)zJ<6h z_9uA@5^3C3)RO$YKJrD5?!3y#r~xJ_z@4 zq$NM0Ofn@si2SoYlAyJ5Wascv=a3r$?K9)3OT>cl_YFjiYGdflDGT8LxL8& zl9Wv*G>3HFn!u*CG;D)E;&b_6qlK8Z`jW|;#9aQlNm9Yq&C*N?WpakA*8`K^gS%i{`{@pQM!Vd!UrrUxeb5PF98VCCU4RGQM>Z$|yOI zjK6gv7q+>uf#P4CxHh1qV5|#Sf7pl&B=tlBH{dA;r9p=sk_H`p*g|R@3n0f2OM~uy z#5U;SatrBr!k@G}CJlP~F)`=?$E87^`_e*go$?_=PKZI@I3W#s{z>bgEvGDG+8Hl& z2|5udrkaoH#+gubg^WAZi%l$Pc+j2t-9w(F_-rV;Co3LH8lDlueO{jr7uRaQ$M?Lx zZNyD%C2YjIDvT(AT&S2u=6)lM_KkDWXuDK^yIcw&=PRVqCY`s9_Opu?(*3eOiN2Ib zj$H0SzPKdK&ZuuKr1_N;a{W6Cd3vQ48FQJdBbeatC-ZQz=X*&V)2>*^((69N{i;w$ z(N#$uZ(rl;n1V>F>zSm@O)s+Nx@?N+7j;l5xLd+~t2W*wgKho@q&Q9Dv_$DcQK z>c?cdZZ?Dp!1s|H{y3Mk(_@R zMLxMFWs?gig}~Q7P;f5jv~jbLTT7nfg$;Tg^w(T0IByx%mtbKM5@y6sYME*vCnu@{^Z=V#Gv|F>DigNS!wBmb0%8Si_E)m zjT!@s%*WuPQ2C5>tC~A)Tpy^_&M$3b=BPCI>>oHLJtu2aT40D@pun)J)Ul}<6SGHU zv+FUL!_qU@MNWEl4*N`>V9CtR%}Qt2*<({J*&{P~d3JjGFjh7!eRyha1}{p@$OweK z$htJGaG9zDxAT{ZF5J#v7UBnYq8rvbYgerIygSw#)+Ot;wOiJEYu9Y|(7oIF-l7eu zkJe|X2%k-7eQX`ulad6QXxi|A5X`yYSA za@MH2#i$0V!Tym=uO>2!^dYhT3YR+YZ*TI<-vMnQ34i$WtW!J=NwJ2OB4nY1>L4%L z38U(~(hmRVYz?2mbbTZWreh39xYi@!v|@__8dnn~<44YPO&pqrApg`wy~sjZ4~j^_ z1Xo%Sg^bv%F)et25@>8RFtCGQV86yZGSMfsQ8MzQ7aAc~7Ku@(YKUKBE5v~=Z-d-% zTw`KNjAuca?yQT1Zs@RjNH_-#afFdPZ-<)E1CB`e2kqq~)kDiTn8qK3elDz!T!pvL z?F~REv?B_j?=(cc>CRx(iXLx>_}tL(jo=|$5ztP~aCIRT8R=PPV6z1Q?bg`ZhrUff zoqS0^-J1$tZR+0@dDHbxp*C9<&|nv+&DI6<2N$Tq76x>cD_j}hLB2G?4e`06JKUfe zTNuz(ciS9|@PK+p-UHrm9#9V!2Xu-j)PvPQTN>ky-+)P z%g0)6z7d}8jYH-oesHlc9<`v0{j3xIBp$_}=d{uXB|%#0LXRZClZz%4N8>LbN7R&* z7bMdnf0%6Cl%)_F_cL0DOti8EN@7KjL#S^bI0 z6V`IGbZ9x+g;vFahnzc)UZdwDL4B}bevQt24P{DJIw%U}tp7;R_~R&;TDCGS8y$^c znFecO8qfxw-Zg`H>}rE}bfN9qN+$esTX@D6#&kwIxLQ62OxV3WFxc9-Y-M|71V;Qt zdbk5Tg(<8>sRf)Pp(D!2v~@P}E_)h_LJTzb17s@e*cIKv^n;1Wr>w&}Xu5&AOhKMy z*WW`91{$3YKIG6#SX2sXKuz%w48%I63(YM6dwV;MPm3e%oQOmmqJHt<{@yep3EJF0 z9W6nqJrhjA-fkq}Lsxoq1`5F4-DDBueu6Zz6^UT?5HjeF-q7_qvrr&y*9W|Vg@=88 zgc2iZ#{1Ur5Y-o|{m)!*#qz!|ezxKY;`)e zq@$0EP$=%>ZVR<+4MTekk?ePx#=6oYYhnIf zS0EpJ!d<5FiNmGtd@Xh7;RwKdExN;&&ksZ|z81Y0v>JNxC+`KHjBTSN@mKEL8*vO%zf!*C6eK+G0YNkzr3-UV4((K*H8>f4+;4_1cnJNjIMRO;nfgD^jP4mA4 zPigXz%~L#PLFIvmVJWh6mXzk&&W3tC&E1u-7`*vHDvl6@GrKwTb5&u_8W$5&fnC1(5yVn)Ah)|t=#3B3Wx+@sr$kH*vbY6 z@&aV!;**~sKio!$v;8UUUM7jZVT71qVtZBG)5wkZ4G!VER@|Q#VW`9+%fD5q6&`Jr z1@xW=B#gz#2Z0L&!@?QdIu{6pMId^=fN`wi9pNGU@gWJYj`gEA-XrAKi9WDXK73pN zpaKi*yZ=J|_>xh!&S_9CA+89-10!)hBQzl*JAT1SKou<9JpKp5O>JL@aS6hv7af=6 zoqVN0_a%6z6D>aphXO3J2h)PHFuCxU1-s&W?2X&|vYA){=Z18|Nl83b5)V5C;;~HJ zRYdo}LLQ!*3^)pBa#_A?qE%l>!a2S?H19tRvN>FqEv66=qB*`SG`lh@^1^GG{9HQe zj3m0rm&c_1vmm-jh*tE@N2i1+JY`e&unumHzh|SKM7y1nByUJPzFPsJH$;z_=b@KE z7M@EZu0@wPVmH|EbiNFEl5bjeTo4pEJ-l!g^`WLq0BJ-!e+LUN5$3~a`DL+|fID~uh{cj~mR|fGHPQBwhAa&&=?%OD>8q`&Qt~+BRtehB#9fH!V?gUK5v}G-5S+;_*b8kG9JU#1eof+R6aC={W5jg;ZQ~$zP>9Y{ zW3~y*C+@fzy3Q~y=zVt_if*%Ozp@w)oM^zVrn0%-cpo18+^#!*`G)3 z_5u9KEv!2>6^NKQt38L;oWpC)$u)lir!DLeXY7L;!MMt<9{825@x?C@ z)7EV;*CD2|7bg4);c+Hf(j14=z-HLXX^07`o@Vv0U@(lPbVwNXq0^e-7(B>Cp7^(* zKQzM^G37G7*{4Gb{0rW1qNX0WpUcQDx#sVgbQ$BEVpqFOv~x$?rpYc_$qu;69ctce zA_e2z@pgC~g57Yb^tq3JdecTNaXWm4bCc-0mN>?#n)#hzI#n|-ijpRu+ghm*t!x*q zsOaQUYmYjje=k9D1x}$Ig0P3Ii9aM@brZi?yDj?r-(dQPos=I5b&}{_{&ZXc& zLUBV`Z~JTTNp|32cD+>8W)c}@H&5*+Srgn zJtMHE48KN$Uug%P_x>rte)yE6q2fsFFT;PS!I#^C2i^P7)njo}{7dx&$dz+ngTHGB z9+VS<>jwzE6htfZQZInppv-7$ixQa={oh98{4S2h;UK_#=S7UdcUo)6LA)f!r#SRf z4gSxp$1qJl;BeeKKp19vTkI-Rd%UKNo}xro@5>8uB>q4mUwj?A%gEUpvc(Q^Rd<-s zL)(y}6abd-Mp9^}_PDXEktLc&h#if9QTovMi&->6J4wD?N2sc9ACYy`^cbiyf_jA%)`6ydiEq z4)ljNv6E~9Mr!cs_TY!&Hh8`?jzJ~Hyh8uDyA$kidOM8fZTTM_TKYw%m`!1Ivy zZ8H(lLb@qJ+64{yoE>By(q4c73u&LdtFWBs8gi8#WM=Jf{9M%n5^Tfn3T3!8SDR#0 zQKHKvnUSCl0z9Dyc9BhDI}Nyv9bkwsVQ7c|-|DG|j{`N}6g$9dv4K7m;GS`c*~`~} zr`Q3utpm^oLI1cfcBE(HaZ{O6)@qtqZATMqEkJq}G_EaHj;BRC>w7Dl=cJ~I<90M* za~|}_rXxo$OIGIaQ5osl5c5FO*gZQMW3B|J#Sy_cnHKiNM#t(Ewi8Va!fhR^SHE?6 zfi9zDegsEK99uI9`^d0iYHWypEL#o0!6L^V8HnrA{AAof#!ghT2Gy_&C^TY#)Lf$~ul{Y)6IFz0%(T&zFG!XS24~==vhI4&gl-B$cI5@S4Gs>$ICjMtg}uF@#=fo} zYcmOWy$9_6v{9y_%Y)VI0s7fIk<6rPVgXxVQB2%)HFlbQEKekxgeIPkQ`p&hHTE<8 zSROgg2y9w5c9G57S8D7D{a9`ePXzW%4sIgDK2&3W){m{8H)rbC157RI1BC@Qpb^~_ zrsl{CbeS6W*knJ-J^i5!SpXD#BVHH8(*&m&Z)6NpKi5sTFlD zf`CjXVPBb|$E)Ev`r$lGeA-Ivr|ah_CUCi$OZ9VkVmjK2Ml6E<=1#%Rvi=@Ya}Vg} zf?-6UJHjSzOjX##Ej9KB{aCJJe&FR0#B@ASwtO}OsW&S%M2RkI0|on#`VZ<$CnK$b z;6)Z5Bg41Q-~;TygVFiW@EMR$djtu~YxF6+R6DX%aWe9*rj57lXv5}=I5SABG+HiK zoLyvS@T2U&bAMb60xNK~OfL&G&_X+)+!^;&t3-a*wM#?ZVFwwOMn2w(uO=4FEGmGP z6JmdKC5SH(T4=w)sNKkXr|HN=J37J-3SR~>*Bwk>t-=jt%KJk@{>=_D-xwIF^Ki&) zaq|?p!iz=>RRmslUZBersy7Cm>6&2>)X#mUNX8x2?DqQEJiQGQ*qg<;Hto1hu}?}> zvj^#C^Bgxzuve{Dgvc3cY=M3(cdIWVQ3Gjby+tT|=ML;Z8*Nl<5jLuuS!ZW6?64hu zBWqyFCPf2Zs~b3_zX84udM4C!e>3)$Mf%5T>@WJU+=H8hh%`NTtKwj)5fm$rH0z5J zU3SQGUQCDxW#4R5gtFH(gXjp*T{T zsKI|=2cDub^SNs1tdd4VqD;|rKzf&F2xBDS_vW1H*8a%Hgh zji{Gkzqe1Z_3f@^chk=X2Ny3J(KyELPY>@`EN;iD*`xKdZEFiuB>G)^P@$7WYW95n zY#x}(M3-wHR&;r{8e5_t%K{U+CVD&Zh+_Njy&C(iek?!f`$u5!A5|pAztq?l`my|_ zpI@j5c8SLn!OkaC9Y#Gxi7rdFrE~ly?2_0z|L}3_EL()cYVaNG!1E|QoOe8hwm6{( zNogAL5Ie{`O3wlE5j==4J*ikJf21LQXb0IArQw?~mICk;!&eIWW(|FV9dsU~D?+LB zX>69M`K$(d+8!v2&;J7gEM;B&T9LAzYRHf5A+wjl*Un&P>;fm?>^lm}FX0AmsaYfb z3P6U+VIC{B=~)$mCDaK1a~A13@Kq5?OQEpL+4-1qf! z`SNC}#NBZo2gtaS)!YgCx!i5mNZi1SiuW5U)!Y^Ox!m}^te!~u+k$d6_pp90%ZK>B z)Zg%XxVC5dgtUy@VgK>o=dQY$O8w1L=dgM-b_ICml*?P<}F$JjyUJJcoN zlz$oQ^B)wUkZ8yY?I824wqMe<+fBtvc&~?n#@I&pu^YHaC2;9e`Ww~><20GIoD9fGOBSdnm_dxMp zf2)SP$qq6YNEp5#DPqDe3O%3Gkk8mb=JEEqG=o1sRFJDQqdE6TqDU5Abl_FE7sJZ?0b9vOu2QK^8 zXW*}jZP*kwcanZCk9lh&#Y&AnS8P01tGS=*=T--!laexO{;rs}<7)U({cs+N9!Sc_ zexXptJvH}^ey%NCz;6<&%3zCr>kq}NvbwZ#zrxlWc!4fkW3d8-F)SQ^{-xNChN!Wv z^kbP%!;f`%Z++_-YSSYxaebLB$En#p^s|{)qg=uE|3~p!BukCW)Q^QOi*GTbrGmZi zmBKQL)$GM~ve_#a^aW=Z*EM+2vH93RrjmVX{vQ2&ZXx{K^GDJ5U$H^{^@!_g_7(kX z9*y7!u6)xpyrx0^(*Bj2`%*ubhoojvEP7_ivXlv0QXY32MTstR=JDvAD6txNueL${ zmHe(6a%VfpJR+H+LuNmm^6>$Q;h7to5;(~NzM6>jlo<^TeIwI zOZLUzozid?IvV8P4*guy#1=c6;8F0BGzB&5E2iMQhWw2kWWFH#oml|2qu--IF;BepJ+bQCXskArEy(go^OG z@ooN*TWCnLSom+F_C>=lkH)i~AMJ-LQq+pQm2AmhOZGA!f}&pL!_m~y&G4G(D8DP` zf4>HaUgl$PJ;!_;{=$D6b0V4!b~C(=Pes$^ZU*=jNHjg@W@u?V1&^WSJN#qh+uh;! zS%$3a9MMcP#qNdxKT)U9AtQBYdItZ|R$-kH>0?KY9hN?!$owLjzU6KR!GA>4YR`e5IfvS?331o~g2RP*Ej diff --git a/core/dbt/docs/build/doctrees/index.doctree b/core/dbt/docs/build/doctrees/index.doctree index 19f1fe1cd87981168f5032123ab54f51ffbbe80c..19135572dd5450c670ed3f357073dbd56da98f59 100644 GIT binary patch delta 7695 zcmb7JdwfmD_BTV2Mg*x1gmdc2Sf}H_}(!+hKe{O4C+ds{ml075evp{tS z69O^uN{mBGa)TIr~9-_y!b%lLH z6o?!=8J4CfaBQ#zE~L01d0_+$NKIk0I+6saOI27?N3sS?LlrohVx&jgLha5;@VBA+ z+$WAj5Llh2z@(w!a6Qe%*11SLD8o{qIieHnAEvOG-N`TT#Bc>ZPqRYxa2HJY%?2br zh5N<)-z>?48R==ll%x^D6kx+OuwjG?-m^x*)e&-aWC4_oa=^L_4J%C|FTk{s3KWen z!AB!qY*Q~{g|Ja_Ev2IrsLc2TdXH9M=g3faWwZ-w=Goxq(W;rbPbuuRzGND_JBF(Z zqpjMrtYk6)3dTwa7so2@NpT$r{Nrf_?vAlS#yA(tY)2ws%eWNC>l_2R@d~@DkWb_K>Or%xq!TRoX_#(@R-=4~J&=c(;y{`@4!pFaGOjk$@h=uN1onZXr{h)tl z9lSY3f$yF%(O_RFiE{F-Bw^5)D>uC?TQ4`YFaISWEB$aHSKZa3=?Yw)W`-}POIOCuP+j@z4Aqsx^AtEd-2mJ3q#pui zN?kr!;G3&ELsK_9oX%GllsZ?nwx$3>HQxY1h0@xYh0uh|II%!=ZqW0p{T~-8@aO_P zjrA9MfuTSWVRmt1>)Zr?XzXc%zrSNAeL(ZPgC_X9$=SsOCNGrM;)Gv|BI$U4vESUj zCyJv;7NQHuO#J(J`F4{qGJBC~oWl5N0mcz_k|)bf>?_);o*Ueckb*0*e-# z!FQ=@W8qR^Bl%Il!k1}e&Msc2!1phj;PNsTJiauEm>_G745FFKC3Co!E8tj`PI}@O z2MrB?x@At-|B?&_zZL4TidLx3aIR9|&6o7Ba#biaPwz=y^a!LS0Ul?-nAOt!k^tcj zQpshL63AW^MQZ|7Uvwq=IJUew4Pw_m2$OuWmNSVCZhVjgx>5x;uQ5PTsS9qbNrJ}G z6m~d)Ooq*63gl_;5*<`kB*3gjA4nWm{+cWe(POus5~CtT(5dJgvm(@ zHYku#VS+OoBzq^lqE2}06*cs~eN}-@>kTkyqYVA+8^yMR=5t{=``r~VFIeiv| zh9p9AC6A!{@&EkFG~s|jRZ2+58PoFT7X7C>U{!rZIr&G`g9O3&H)T1=-^~4~hvS=N zDY0$YFKdUgRe=>%q4d{4$U2!$C}?&$AbgtzGI!Y_bDOLmx@raLHtT4Y4kD#F13bzn zl5Q}2yH1cR830+^9JE^pwx%zc2X)(>_zdedX<_wN4ShxeITz##;8YIeKYDDHr9>kt@q}bh8#QL{x8S7);mSOzX zK5wl1*U4CarcMpzlXY^^*YzUSNi}E=b`!nIx_8tmWWO5i^#^3M2OX5ro_|n=@zsMe z+DR+;p}_%t56NH;(uiRH;*gv*v_TE_<^~z;8HYn*k*dJee1AZvjK5&o} z2s&cEPq!2FUXmztDH`~)Yz)AAPG^dyRjYz$vU9Aw7qv~OSbeW7sL0BRz#cYgLPaiC zFJAJo!y=1>-eRuzfqUJx+$FSF1K$su2%HpC_5@j3=IuN4GI`>$V-1{uuP@WqfY+GlS)G*l&8}07U+|~EycumtP5nW@*%;p zmdwC84vOD-N)-jGjg%j3td7A7!&Ns|r$Oy;o>E;Q;!_3IHDr<90<|s}*6xU<4N@cT zkApwoFWcg<6AHY(qaD1yGZ^wu$Z$J;LIzU*Mj39<=A;7gdn`1%qX@LUC-=!JoA`wS z*Y~x9%37WKr7sQ;nB_LO$DDeW;#;%(tJ7gr$nN}3u-osi?^5{nfX;p9Tr!2>hpf<} zNkk=VKIEX!hHz%3lS4prWB_Q+%QA|?MGdoH$$2OL5_z~QEe{cP7amTaj6-+>ISBl| zNfkzye51gbZ7LApWej~M<7h013DV=j(Cz6@8VkML~ z=y@GjiP(yTA`_o( z>PIe0Tyu3i+-@?nCGD_cpni6%9Z}pLpI<{*YhR+k`U@6nHwd~!24Z`Bi7b1}=0{}R zV_*8?yo!r9I@-Y7epViUqVO!v{2oD3CyqrDmJmq#gX3~EU5TQGPN3!ZgUqqczu#`?3Qr7(Z>e;-U>wHQ(5eCFVl|F9d9wMz!)ogV7X)}>$JT}@Y)@f#LE0L!>);?SY0!d?8F_>Uj zR#{Crk%v7NXTtyrYDd~a$KQ19B^!ExcM)^>+(Z^+N3TE3t=Zxh9dvdWCm>S#A8!ua ziKdldB9M7kQ7zRZM`HH#D<@VGiOb>lIjl!C83=X%(ldxA{McxMJvI|&F<)2f+^@oT zwIf%&!tu?qyZzuDyTVS#s8RV<*vqR(ByDdLT}cp2j}-*t!!&Vt*u&%?7XHpbA2D(& zBb_}X_BJI^w6~EjwEU{w18hV3ai~H@$cg=oBI?;|@v1_6@nRbiO=lS;r_J*s3%p8~ zNpyixkQysYP|*cOM%s~hx<%+^{Dz6_^h($+B_Owv15(0vqhvN)B1)*)`=c3tAn86~|%IMo%LGcx}G^jLt2IRZ!)31N=MR8H2gA9|tFFe`|i zzjaWJNif?1mkQRHSY{wepvfjqZ>%Pe^dJMJ0OYswWGfl%6_?Q>j+uI>0y0eSzsWmT zc@Jcs3={hkq{q=YDiX5Pnp#Mk=PGmPnp?{P|}qyLzsF1S?krf zieq`q+;5u_6m^?cnT2br6_mZoEL_8u7>R}+kP0|yeBf1ZSSXP0m@-gZ;bF6AXV|HM zCis5Is2)lUYi?gs=U*f=< zO00aer<@mKk#%oS%Mf=nj*b>Ek->B#K4X;~XTi#+<+sIjhMYXkBC?k*Xc>xl+!=xK zxKirnm9t#p*Kxe4oV3y+Dz!z{&E4vVO^+scPO*sOtBWSx=!YE1+x$mhcX0@NECxBP z(erLl>)fxU;w+(tmkIY*ufgWzTgYK{U|n$wjKeM>MLV6``iM|9YGM zyX=Xy3g;q9%|+_;ykTMSNL>A;20Sm&RHKU>PT`@j(2yDcC6MwH1yyxKzeorbx6ec|V zOpN08SpRUi_n(`MS6u&x0sbb<;yu76sr`ag#NKTw^@3HTJ*DBoH#pqrKpzH&^TelN z;gFT>AmQkUJ~UQN(T9tSr;mDutg8!iV^Fwg2h;Ee_w)$~887EN5iZl6=6c5LwMUU& z7T{!=@N}`ny%7FTu}~`IA&R@^4bO;;>527ox0H_APV1y}cepV9h@9sRmnW8!EkmUj z2LIP+^T~92MMA!nhWsq&;E3mUTbpp!x8Xdlqo~e?W=y6Q{6W)=HW6%{Un=CJB{tCs z&^J9pR@WbWyjQ4_EOJ2N>N!rb$Wb}D-X>Y(RLk(%0E~i50?S(fdR;=Va;PMbJ7{N; zgV-d2_}S%%y*C(3e;8JGEdA7O7cQ~lN6f!OyKozh@GwLBct%W{iUmInr$*`6)2YNx zNAc&5^<}B#CZ%ia^-G^57h0!IwYO@OI|cvbROf8Xoy14u@lU~O-@L3pWy>pMyz0v3 z^H$_kuMksK6y(_WOMwf>*_xApp!&V(4 b6y(I`%)`3K&6e`jC5=ia&c*tL3>YwJ?i zs#bejf8>Vwr7`v55YmQ!uC?5-%m{`2CPfEO=4J6-fas zqYXB?TX4_*P>dRqBzW+>Av#_zU5QCaI{rDaJ>oD=+@2JVcW=fZ3{Bz#Cqh@8IaIeF z?iC97;xHY9lY{X6Vewd&6NPPt@5lVyN9zkWn)O>!zlP#(t3h}Cc?ocsK$Ir(HvR3Ux(0KnBAruddOX9V;5CH~U zFupgA{N@~%ju(OR!z<(Cv3{Bf`%Fl}pl`e4hZA(W>E$B8gA;W;Gsy>y>G2qt(F3#7 z$=ho^@O-+?-OO+a%O>dPV&VOab7rkas+I~&65}4ml@qFB3r|5T##Xqj@+p`sz$;(JU4?-Z)r2`d6oU?Yj|Ls^ zQDGPG!^Zqr?%_#Qcqu6ahZKm2u&y1@Y8-^Qg%+`FEL{gfP%AbQQKynf7)N5_WCg!d zYhajW}?Q zN^{j5mF8#WI$W)ptK;d{y-}Mdvgq)@eSQ~+!J~^}F}CBg{KMhkg^TH^pE!Ez)LXAh zr;?r#PQiHk7=s>%U9q8bJ!CiUHZ6!`v!205pGg=|s@#HA@@s=fYpgHTWQ)SO=A(@Z zG<;!^2~R9gu6A9hT!lqC?p^GU)BL zBkt`x3i~cm@%VU&ibvv79jCly#GOl3JYboM$INApc!a*CW1p4YY+grdALG-I^w`k_ z7U1TUbD@I%#~!biduap<-yf*ls3ESR&a5Bjf4 z!kfD?v2vA;`QA@3e6@~&6`{Cbbv*W{42D01Yz!>(!IwT>hVPW=7{rtCKjk|9>v$X7 zVb^d_1$Bl!3VDTe44%i-$`~wba77RPcifDGF8lrQF~-ZjN~u)thg3v9; ztqH|;Z>uhw`nDr4)$+5*i#u0&!5psR-Gh`3F0%orh{TMkYaFOkwT2wYeT25PLU9}1 zy}^LBYvaS7EwmNQC@IXeO)8$9Z>!91n_ii1Sn=0WG`y2GzaWji_YKO-o{^JD|4)dR zW-Bht$&82&ickW?$~xVeH@hgONUW#L$g<^$MX{}@Sp09Bl|Q4Xq|heTMboYMMcFgt zd6CVQCC;*JQ>-O<^2nN(7eR(7W7Z1I^INFCQo2Sdzf!t3I*4}40O!tWgmW(qac+b$ z&b8AZ=iX^l^PUWIt_=?BoO`m6mg%_kon)E%NAK#LJo9pjis$_$pqRXFfErZ?)~O74 zTQ5fyPT8Pp<<=1mk8eo zd*kS^ukp@i9sl>a0h6{Uc-gY$pv2Mh?+GnN>{YGWXjyd%uZ@8ad|d75qqbXh{CT@K zW^DDv`V;*i4jXqkup{ea2u6J%u_M8qu)|xqfkb;=X+ z(YiHG7WBPs-LdS3D+s!|U55p|{9GhHN05Y7K_h$ z;=KJTnrHSaP)hoTBbdH5Iu5#Ez~UM;kN#al=FlvD@qn7etd%B(T{@`Yn(qRchb9r{ z=67by`$Uqma19syVFpbE7&{-*aqgucoP9`wJ|4al!;&=Kq`_F;?*>;WxZk2ET5&_@t@`wn?ykF z5l(J7rDOG7KX%v?kKP@HqyKqOTrlz7Ivsca=!#Q*)$sQ^)wSbJtC~A?T6QhQ*6TRz zo)4DSt9HFxzhB-YhoZUOj17$%&V1laW4Q55oVZKcM}1`jDRkIF&E9Zk3d3W6_}NFD z?ac7^KaKXq=MFJE^~B3Q?!rLEBN&*l<~wxcKwr7QHbCD}Iu-P~$BtXYx%8qh*V^&(2CcbjsIuf(Z7#cXO%UrX>~KO~I-F@BYsg z+R9p`OJn1&1Xdoc>4du*AewbHG>5Fq9jP_YAhlZWS{N5AiZA>hx#jk4)Od8!L}NHECa>2p6Y zgAwP3B($$88`Dbu!$ToD8|3`PhlfEJ8%fLrS7O{%KNh+P@kovdOMOFd-=73>MobMm z$6i$i(BrBk9J;W@MtPfT{I6tL?odtd&?Qd@;*2<8?ku)lu`D<8!I9+UCnTtAdet$I zEJvEP@M{1TIAYIu6bS61;yGgE$GSljyDhDg1A{$~Zt@ftFv_M?4fhUe%7| zi6Zhcn8pBHol3tR6arUM+3`e?^H z4hCOOy586dTOdK5kBmQ!3ts)K64WRJ>2O6cHu|(|D7fo9<-vW)qR?HQ(25`JMAr#I za{j8j6v145MQiYAjqISc=#4O5ZlE_zgRd+23f?G)7ku%UUlPqHc!EL@e#R3N%5YmJ z^8Li~5X@@@8U0MagT0(&YrQD;gx~yvH&OK&071N;0ebRPVpV1kfh>*T?TqkmekTz+ zaMuJ1_dO%Yp(^LYeBhwCc=2*y;{It6nD{MUp(8C2I`Y&Yh@nE{^E<;E1n8%DxIYZy zeuJSif773+2+R33e}{@OCNhLxvb-XIM3F8({!0M4CZ72`F_7$)@BD>As(k44lprFE z?Mp^=PXbTAJ%~iogP*$utCHcfgDC{!${DD{U0Bz(YI+ z`@j#)2Sd3}n99NZ&Q4^5HoPE4#Gc@cPDNFCwLIawGpPL` zCvpjWpK{Pi3yg&!Ex+&$DfF|(I4P#$S_Ic3m6UWLMdB<>e@dV3CodE*?=MGLBcUs- zP?pdm4JyS~Gb>jj=(z@4p3;~5*{$Ag0DD>AJac+6u1c-)6{!0&vAr&md^58fk zjPaLj{S{q|ze4We&Mp5oo!VidWRs5wz9vrTv(oFD`smeo>LXeU_%2mkR)5|!hO)ME z5@=ASG>X#R(o=C(DbDy_#98G}5DxnkW0gN&XoX1l!eJ$qe4VoLq_mR%+M8HtO{fbu z6wgV2Q6gR7p)#>u7Wvf5B2dtl#={3H-nEKXv?D(-ka*XcF!`JRumg&9t%*nF(ZoJs zk~a~4JwdUZR<_blar}cp#C6(acYApuuv?1lw242NPIK%}sZrv1ovudefer

    exclude¶

    -

    Type: string

    +

    Type: unknown

    Specify the nodes to exclude.

    @@ -94,7 +94,7 @@

    project_dir

    select¶

    -

    Type: string

    +

    Type: unknown

    Specify the nodes to include.

    @@ -176,7 +176,7 @@

    defer

    exclude¶

    -

    Type: string

    +

    Type: unknown

    Specify the nodes to exclude.

    @@ -184,6 +184,11 @@

    full_refresh +

    models¶

    +

    Type: unknown

    +

    Specify the nodes to include.

    +

    parse_only¶

    Type: boolean

    @@ -206,7 +211,7 @@

    project_dir

    select¶

    -

    Type: string

    +

    Type: unknown

    Specify the nodes to include.

    @@ -341,7 +346,7 @@

    vars¶

    exclude¶

    -

    Type: string

    +

    Type: unknown

    Specify the nodes to exclude.

    @@ -349,6 +354,11 @@

    indirect_selectionType: choice: [‘eager’, ‘cautious’]

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    +
    +

    models¶

    +

    Type: unknown

    +

    Specify the nodes to include.

    +

    output¶

    Type: choice: [‘json’, ‘name’, ‘path’, ‘selector’]

    @@ -374,14 +384,85 @@

    project_dir -

    resource_type¶

    -

    Type: choice: [‘metric’, ‘source’, ‘analysis’, ‘model’, ‘test’, ‘exposure’, ‘snapshot’, ‘seed’, ‘default’, ‘all’]

    +
    +

    resource_types¶

    +

    Type: unknown

    TODO: No current help text

    select¶

    +

    Type: unknown

    +

    Specify the nodes to include.

    +
    +
    +

    selector¶

    +

    Type: string

    +

    The selector name to use, as defined in selectors.yml

    +
    +
    +

    state¶

    +

    Type: path

    +

    If set, use the given directory as the source for json files to compare with this project.

    +
    +
    +

    target¶

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    vars¶

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +

    Command: list¶

    +
    +

    exclude¶

    +

    Type: unknown

    +

    Specify the nodes to exclude.

    +
    +
    +

    indirect_selection¶

    +

    Type: choice: [‘eager’, ‘cautious’]

    +

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    +
    +
    +

    models¶

    +

    Type: unknown

    +

    Specify the nodes to include.

    +
    +
    +

    output¶

    +

    Type: choice: [‘json’, ‘name’, ‘path’, ‘selector’]

    +

    TODO: No current help text

    +
    +
    +

    output_keys¶

    +

    Type: string

    +

    TODO: No current help text

    +
    +
    +

    profile¶

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir¶

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir¶

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    resource_types¶

    +

    Type: unknown

    +

    TODO: No current help text

    +
    +
    +

    select¶

    +

    Type: unknown

    Specify the nodes to include.

    @@ -463,7 +544,7 @@

    defer

    exclude¶

    -

    Type: string

    +

    Type: unknown

    Specify the nodes to exclude.

    @@ -476,6 +557,11 @@

    full_refresh +

    models¶

    +

    Type: unknown

    +

    Specify the nodes to include.

    +

    profile¶

    Type: string

    @@ -493,7 +579,7 @@

    project_dir

    select¶

    -

    Type: string

    +

    Type: unknown

    Specify the nodes to include.

    @@ -565,7 +651,7 @@

    vars¶

    exclude¶

    -

    Type: string

    +

    Type: unknown

    Specify the nodes to exclude.

    @@ -573,6 +659,11 @@

    full_refresh +

    models¶

    +

    Type: unknown

    +

    Specify the nodes to include.

    +

    profile¶

    Type: string

    @@ -590,7 +681,7 @@

    project_dir

    select¶

    -

    Type: string

    +

    Type: unknown

    Specify the nodes to include.

    @@ -641,9 +732,14 @@

    defer

    exclude¶

    -

    Type: string

    +

    Type: unknown

    Specify the nodes to exclude.

    +
    +

    models¶

    +

    Type: unknown

    +

    Specify the nodes to include.

    +

    profile¶

    Type: string

    @@ -661,7 +757,7 @@

    project_dir

    select¶

    -

    Type: string

    +

    Type: unknown

    Specify the nodes to include.

    @@ -698,7 +794,7 @@

    defer

    exclude¶

    -

    Type: string

    +

    Type: unknown

    Specify the nodes to exclude.

    @@ -711,6 +807,11 @@

    indirect_selectionType: choice: [‘eager’, ‘cautious’]

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    +
    +

    models¶

    +

    Type: unknown

    +

    Specify the nodes to include.

    +

    profile¶

    Type: string

    @@ -728,7 +829,7 @@

    project_dir

    select¶

    -

    Type: string

    +

    Type: unknown

    Specify the nodes to include.

    diff --git a/core/dbt/docs/build/html/searchindex.js b/core/dbt/docs/build/html/searchindex.js index 0555d3c1848..3ed297346d9 100644 --- a/core/dbt/docs/build/html/searchindex.js +++ b/core/dbt/docs/build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"right": 0, "now": 0, "best": 0, "wai": 0, "from": 0, "i": 0, "us": 0, "dbtrunner": 0, "we": 0, "expos": 0, "you": 0, "can": 0, "also": 0, "pass": 0, "pre": 0, "construct": 0, "object": 0, "those": 0, "instead": 0, "load": 0, "up": 0, "disk": 0, "preload": 0, "project": 0, "load_profil": 0, "postgr": 0, "load_project": 0, "fals": 0, "initi": 0, "runner": 0, "thi": 0, "re": 0, "success": 0, "cli_arg": 0, "For": 0, "full": 0, "exampl": 0, "code": 0, "refer": 0, "cli": 0, "py": 0, "type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "string": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "model": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "have": 0, "been": 0, "explicitli": 0, "which": 0, "overrid": 0, "dbt_project": 0, "yml": 0, "path": 0, "directori": 0, "look": 0, "file": 0, "current": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "includ": 0, "The": 0, "name": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "configur": 0, "onli": 0, "appli": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "metric": 0, "analysi": 0, "exposur": 0, "macro": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "how": 0, "invok": 0, "command": 0, "python": 0, "runtim": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "select": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "resource_typ": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "How to invoke dbt commands in python runtime": [[0, "how-to-invoke-dbt-commands-in-python-runtime"]], "API documentation": [[0, "api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "select": [[0, "build|select"], [0, "compile|select"], [0, "list|select"], [0, "run|select"], [0, "seed|select"], [0, "snapshot|select"], [0, "test|select"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"]], "output": [[0, "list|output"]], "output_keys": [[0, "list|output_keys"]], "resource_type": [[0, "list|resource_type"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file +Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"right": 0, "now": 0, "best": 0, "wai": 0, "from": 0, "i": 0, "us": 0, "dbtrunner": 0, "we": 0, "expos": 0, "you": 0, "can": 0, "also": 0, "pass": 0, "pre": 0, "construct": 0, "object": 0, "those": 0, "instead": 0, "load": 0, "up": 0, "disk": 0, "preload": 0, "project": 0, "load_profil": 0, "postgr": 0, "load_project": 0, "fals": 0, "initi": 0, "runner": 0, "thi": 0, "re": 0, "success": 0, "cli_arg": 0, "For": 0, "full": 0, "exampl": 0, "code": 0, "refer": 0, "cli": 0, "py": 0, "type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "unknown": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "have": 0, "been": 0, "explicitli": 0, "string": 0, "which": 0, "overrid": 0, "dbt_project": 0, "yml": 0, "path": 0, "directori": 0, "look": 0, "file": 0, "current": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "includ": 0, "The": 0, "name": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "configur": 0, "onli": 0, "appli": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "macro": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "how": 0, "invok": 0, "command": 0, "python": 0, "runtim": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "select": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "model": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "resource_typ": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "How to invoke dbt commands in python runtime": [[0, "how-to-invoke-dbt-commands-in-python-runtime"]], "API documentation": [[0, "api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "select": [[0, "build|select"], [0, "compile|select"], [0, "list|select"], [0, "list|select"], [0, "run|select"], [0, "seed|select"], [0, "snapshot|select"], [0, "test|select"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "models": [[0, "compile|models"], [0, "list|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"], [0, "dbt-section"]], "output": [[0, "list|output"], [0, "list|output"]], "output_keys": [[0, "list|output_keys"], [0, "list|output_keys"]], "resource_types": [[0, "list|resource_types"], [0, "list|resource_types"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file From c85be323f537ff31991e618319bbf9763e16b65c Mon Sep 17 00:00:00 2001 From: Chenyu Li Date: Thu, 19 Jan 2023 08:16:58 -0800 Subject: [PATCH 20/54] fix MP_CONTEXT is not JSON serializable (#6650) --- core/dbt/utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/dbt/utils.py b/core/dbt/utils.py index 987371b6b02..370480ac11c 100644 --- a/core/dbt/utils.py +++ b/core/dbt/utils.py @@ -642,7 +642,10 @@ def args_to_dict(args): dict_args = {} # remove args keys that clutter up the dictionary for key in var_args: - if key == "cls": + if key.lower() in var_args and key == key.upper(): + # skip all capped keys being introduced by Flags in dbt.cli.flags + continue + if key in ["cls", "mp_context"]: continue if var_args[key] is None: continue From f0349488ed61c1bf38c610387712d54d6799a50f Mon Sep 17 00:00:00 2001 From: Chenyu Li Date: Thu, 19 Jan 2023 16:14:26 -0800 Subject: [PATCH 21/54] Seed and freshness works with click (#6651) --- core/dbt/cli/main.py | 33 +++++++++++++++--- core/dbt/cli/params.py | 2 +- .../docs/build/doctrees/environment.pickle | Bin 203622 -> 203622 bytes 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 80e0ed7e83c..3feb45b0539 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -15,7 +15,9 @@ from dbt.task.run import RunTask from dbt.task.test import TestTask from dbt.task.snapshot import SnapshotTask +from dbt.task.seed import SeedTask from dbt.task.list import ListTask +from dbt.task.freshness import FreshnessTask # CLI invocation @@ -301,6 +303,7 @@ def list(ctx, **kwargs): return results, success +# Alias "list" to "ls" ls = copy(cli.commands["list"]) ls.hidden = True cli.add_command(ls, "ls") @@ -393,10 +396,16 @@ def run_operation(ctx, **kwargs): @p.vars @p.version_check @requires.preflight +@requires.profile +@requires.project def seed(ctx, **kwargs): """Load data from csv files into your data warehouse.""" - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") - return None, True + config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) + task = SeedTask(ctx.obj["flags"], config) + + results = task.run() + success = task.interpret_results(results) + return results, success # dbt snapshot @@ -450,10 +459,22 @@ def source(ctx, **kwargs): @p.threads @p.vars @requires.preflight +@requires.profile +@requires.project def freshness(ctx, **kwargs): - """Snapshots the current freshness of the project's sources""" - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") - return None, True + """check the current freshness of the project's sources""" + config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) + task = FreshnessTask(ctx.obj["flags"], config) + + results = task.run() + success = task.interpret_results(results) + return results, success + + +# Alias "source freshness" to "snapshot-freshness" +snapshot_freshness = copy(cli.commands["source"].commands["freshness"]) # type: ignore +snapshot_freshness.hidden = True +cli.commands["source"].add_command(snapshot_freshness, "snapshot-freshness") # type: ignore # dbt test @@ -477,6 +498,8 @@ def freshness(ctx, **kwargs): @p.vars @p.version_check @requires.preflight +@requires.profile +@requires.project def test(ctx, **kwargs): """Runs tests on data in deployed models. Run this after `dbt run`""" config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index 6173081ad75..7356267eb9f 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -312,10 +312,10 @@ help="If set, use the given directory as the source for json files to compare with this project.", type=click.Path( dir_okay=True, - exists=True, file_okay=False, readable=True, resolve_path=True, + path_type=Path, ), ) diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index 71d7f331d797d93ef8b33b254985764d5e25a709..a9c2d07f929d940b252cdfa5555e254aa053ff0c 100644 GIT binary patch delta 28 kcmaF1jpx}mo(;|NtYv!|D_fd7Hq)$ delta 28 kcmaF1jpx}mo(;|NtRbb^ma)wp^6ee+jN3cpneP1t0IyIB!~g&Q From 11c622230c9f3a0122427d307b379c2c5fd34bc9 Mon Sep 17 00:00:00 2001 From: Jeremy Cohen Date: Fri, 20 Jan 2023 01:58:47 +0100 Subject: [PATCH 22/54] Add run-operation to click CLI (#5552) (#6656) * Add run-operation to click CLI * Add changelog entry * PR feedback * Fix unit test --- .../Under the Hood-20230119-105304.yaml | 6 ++++++ core/dbt/cli/main.py | 16 +++++++++++++--- core/dbt/task/run_operation.py | 12 +----------- tests/unit/test_cli.py | 3 ++- 4 files changed, 22 insertions(+), 15 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230119-105304.yaml diff --git a/.changes/unreleased/Under the Hood-20230119-105304.yaml b/.changes/unreleased/Under the Hood-20230119-105304.yaml new file mode 100644 index 00000000000..8d85fec68fc --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230119-105304.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Add dbt run-operation to click CLI +time: 2023-01-19T10:53:04.154871+01:00 +custom: + Author: jtcohen6 + Issue: "5552" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 3feb45b0539..2dd2dedc888 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -18,6 +18,7 @@ from dbt.task.seed import SeedTask from dbt.task.list import ListTask from dbt.task.freshness import FreshnessTask +from dbt.task.run_operation import RunOperationTask # CLI invocation @@ -260,6 +261,8 @@ def deps(ctx, **kwargs): # dbt init @cli.command("init") @click.pass_context +# for backwards compatibility, accept 'project_name' as an optional positional argument +@click.argument("project_name", required=False) @p.profile @p.profiles_dir @p.project_dir @@ -268,7 +271,7 @@ def deps(ctx, **kwargs): @p.vars @requires.preflight def init(ctx, **kwargs): - """Initialize a new DBT project.""" + """Initialize a new dbt project.""" click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") return None, True @@ -364,6 +367,7 @@ def run(ctx, **kwargs): # dbt run operation @cli.command("run-operation") @click.pass_context +@click.argument("macro") @p.args @p.profile @p.profiles_dir @@ -371,10 +375,16 @@ def run(ctx, **kwargs): @p.target @p.vars @requires.preflight +@requires.profile +@requires.project def run_operation(ctx, **kwargs): """Run the named macro with any supplied arguments.""" - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") - return None, True + config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) + task = RunOperationTask(ctx.obj["flags"], config) + + results = task.run() + success = task.interpret_results(results) + return results, success # dbt seed diff --git a/core/dbt/task/run_operation.py b/core/dbt/task/run_operation.py index b9d3115482e..9d7a469efd8 100644 --- a/core/dbt/task/run_operation.py +++ b/core/dbt/task/run_operation.py @@ -1,5 +1,4 @@ from datetime import datetime -from typing import Dict, Any import traceback import agate @@ -8,7 +7,6 @@ import dbt.exceptions from dbt.adapters.factory import get_adapter -from dbt.config.utils import parse_cli_vars from dbt.contracts.results import RunOperationResultsArtifact from dbt.exceptions import InternalException from dbt.events.functions import fire_event @@ -29,14 +27,6 @@ def _get_macro_parts(self): return package_name, macro_name - def _get_kwargs(self) -> Dict[str, Any]: - # N.B. parse_cli_vars is embedded into the param when using click. - # replace this with: - # return self.args.args - # when this task is refactored for click - # or remove the function completely as it's basically a noop - return parse_cli_vars(self.args.args) - def compile_manifest(self) -> None: if self.manifest is None: raise InternalException("manifest was None in compile_manifest") @@ -45,7 +35,7 @@ def _run_unsafe(self) -> agate.Table: adapter = get_adapter(self.config) package_name, macro_name = self._get_macro_parts() - macro_kwargs = self._get_kwargs() + macro_kwargs = self.args.args with adapter.connection_named("macro_{}".format(macro_name)): adapter.clear_transaction() diff --git a/tests/unit/test_cli.py b/tests/unit/test_cli.py index 07dda952a9d..dca21fdf6de 100644 --- a/tests/unit/test_cli.py +++ b/tests/unit/test_cli.py @@ -23,7 +23,8 @@ def run_test(commands): def test_unhidden_params_have_help_texts(self): def run_test(command): for param in command.params: - if not param.hidden: + # arguments can't have help text + if not isinstance(param, click.Argument) and not param.hidden: assert param.help is not None if type(command) is click.Group: for command in command.commands.values(): From 9823a56e1d3169faa2753346141ec47e2878557b Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Fri, 20 Jan 2023 14:27:39 -0500 Subject: [PATCH 23/54] dbt build works in click (#6680) * build working with click --- .../unreleased/Under the Hood-20230119-205650.yaml | 6 ++++++ core/dbt/cli/main.py | 12 ++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230119-205650.yaml diff --git a/.changes/unreleased/Under the Hood-20230119-205650.yaml b/.changes/unreleased/Under the Hood-20230119-205650.yaml new file mode 100644 index 00000000000..4121d4a3118 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230119-205650.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: dbt build working with new click framework +time: 2023-01-19T20:56:50.50549-05:00 +custom: + Author: michelleark + Issue: "5541" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 2dd2dedc888..7fc1455046d 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -19,6 +19,7 @@ from dbt.task.list import ListTask from dbt.task.freshness import FreshnessTask from dbt.task.run_operation import RunOperationTask +from dbt.task.build import BuildTask # CLI invocation @@ -104,6 +105,7 @@ def cli(ctx, **kwargs): @p.profile @p.profiles_dir @p.project_dir +@p.resource_type @p.select @p.selector @p.show @@ -115,10 +117,16 @@ def cli(ctx, **kwargs): @p.vars @p.version_check @requires.preflight +@requires.profile +@requires.project def build(ctx, **kwargs): """Run all Seeds, Models, Snapshots, and tests in DAG order""" - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") - return None, True + config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) + task = BuildTask(ctx.obj["flags"], config) + + results = task.run() + success = task.interpret_results(results) + return results, success # dbt clean From a0ade13f5ad0dd146056e77469882c5bb57212ee Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Fri, 20 Jan 2023 15:26:20 -0500 Subject: [PATCH 24/54] dbt docs generate works with click (#6681) --- .../unreleased/Under the Hood-20230119-211040.yaml | 6 ++++++ core/dbt/cli/main.py | 11 +++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230119-211040.yaml diff --git a/.changes/unreleased/Under the Hood-20230119-211040.yaml b/.changes/unreleased/Under the Hood-20230119-211040.yaml new file mode 100644 index 00000000000..2968fa6b6dc --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230119-211040.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: dbt docs generate works with new click framework +time: 2023-01-19T21:10:40.698851-05:00 +custom: + Author: michelleark + Issue: "5543" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 7fc1455046d..57dc01f4aa6 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -20,6 +20,7 @@ from dbt.task.freshness import FreshnessTask from dbt.task.run_operation import RunOperationTask from dbt.task.build import BuildTask +from dbt.task.generate import GenerateTask # CLI invocation @@ -175,10 +176,16 @@ def docs(ctx, **kwargs): @p.vars @p.version_check @requires.preflight +@requires.profile +@requires.project def docs_generate(ctx, **kwargs): """Generate the documentation website for your project""" - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") - return None, True + config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) + task = GenerateTask(ctx.obj["flags"], config) + + results = task.run() + success = task.interpret_results(results) + return results, success # dbt docs serve From 92b7166c10b6857097401e1bbd9a84bc36dcd9a4 Mon Sep 17 00:00:00 2001 From: Stu Kilgore Date: Tue, 24 Jan 2023 11:05:57 -0600 Subject: [PATCH 25/54] Abstract manifest generation from tasks (#6565) --- .../Under the Hood-20230110-115725.yaml | 6 ++ core/dbt/cli/main.py | 97 ++++++++++++----- core/dbt/cli/requires.py | 71 +++++++++++- .../docs/build/doctrees/environment.pickle | Bin 203622 -> 205777 bytes core/dbt/docs/build/doctrees/index.doctree | Bin 97763 -> 98382 bytes .../docs/build/html/_sources/index.rst.txt | 1 + core/dbt/docs/build/html/index.html | 22 ++++ core/dbt/docs/build/html/searchindex.js | 2 +- core/dbt/docs/source/_ext/dbt_click.py | 4 +- core/dbt/docs/source/index.rst | 1 + core/dbt/main.py | 5 +- core/dbt/parser/manifest.py | 21 +++- core/dbt/task/base.py | 34 ++++-- core/dbt/task/compile.py | 3 +- core/dbt/task/generate.py | 11 +- core/dbt/task/list.py | 8 +- core/dbt/task/parse.py | 102 ------------------ core/dbt/task/run.py | 4 +- core/dbt/task/run_operation.py | 11 +- core/dbt/task/runnable.py | 57 ++-------- test/unit/test_config.py | 19 ++-- 21 files changed, 260 insertions(+), 219 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230110-115725.yaml delete mode 100644 core/dbt/task/parse.py diff --git a/.changes/unreleased/Under the Hood-20230110-115725.yaml b/.changes/unreleased/Under the Hood-20230110-115725.yaml new file mode 100644 index 00000000000..81dfefdba91 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230110-115725.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Abstract manifest generation +time: 2023-01-10T11:57:25.193965-06:00 +custom: + Author: stu-k + Issue: "6357" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 57dc01f4aa6..1adc432daf2 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -5,7 +5,6 @@ import click from dbt.cli import requires, params as p -from dbt.config import RuntimeConfig from dbt.config.project import Project from dbt.config.profile import Profile from dbt.contracts.graph.manifest import Manifest @@ -120,10 +119,15 @@ def cli(ctx, **kwargs): @requires.preflight @requires.profile @requires.project +@requires.runtime_config +@requires.manifest def build(ctx, **kwargs): """Run all Seeds, Models, Snapshots, and tests in DAG order""" - config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) - task = BuildTask(ctx.obj["flags"], config) + task = BuildTask( + ctx.obj["flags"], + ctx.obj["runtime_config"], + ctx.obj["manifest"], + ) results = task.run() success = task.interpret_results(results) @@ -166,7 +170,6 @@ def docs(ctx, **kwargs): @p.models @p.profile @p.profiles_dir -@p.project_dir @p.select @p.selector @p.state @@ -178,10 +181,11 @@ def docs(ctx, **kwargs): @requires.preflight @requires.profile @requires.project +@requires.runtime_config +@requires.manifest def docs_generate(ctx, **kwargs): """Generate the documentation website for your project""" - config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) - task = GenerateTask(ctx.obj["flags"], config) + task = GenerateTask(ctx.obj["flags"], ctx.obj["runtime_config"]) results = task.run() success = task.interpret_results(results) @@ -225,11 +229,18 @@ def docs_serve(ctx, **kwargs): @p.vars @p.version_check @requires.preflight +@requires.profile +@requires.project +@requires.runtime_config +@requires.manifest def compile(ctx, **kwargs): """Generates executable SQL from source, model, test, and analysis files. Compiled SQL files are written to the target/ directory.""" - config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) - task = CompileTask(ctx.obj["flags"], config) + task = CompileTask( + ctx.obj["flags"], + ctx.obj["runtime_config"], + ctx.obj["manifest"], + ) results = task.run() success = task.interpret_results(results) @@ -267,7 +278,6 @@ def debug(ctx, **kwargs): def deps(ctx, **kwargs): """Pull the most recent version of the dependencies listed in packages.yml""" task = DepsTask(ctx.obj["flags"], ctx.obj["project"]) - results = task.run() success = task.interpret_results(results) return results, success @@ -311,10 +321,15 @@ def init(ctx, **kwargs): @requires.preflight @requires.profile @requires.project +@requires.runtime_config +@requires.manifest def list(ctx, **kwargs): """List the resources in your project""" - config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) - task = ListTask(ctx.obj["flags"], config) + task = ListTask( + ctx.obj["flags"], + ctx.obj["runtime_config"], + ctx.obj["manifest"], + ) results = task.run() success = task.interpret_results(results) @@ -341,9 +356,13 @@ def list(ctx, **kwargs): @p.version_check @p.write_manifest @requires.preflight +@requires.profile +@requires.project +@requires.runtime_config +@requires.manifest(write_perf_info=True) def parse(ctx, **kwargs): """Parses the project and provides information on performance""" - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") + # manifest generation and writing happens in @requires.manifest return None, True @@ -369,10 +388,15 @@ def parse(ctx, **kwargs): @requires.preflight @requires.profile @requires.project +@requires.runtime_config +@requires.manifest def run(ctx, **kwargs): """Compile SQL and execute against the current target database.""" - config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) - task = RunTask(ctx.obj["flags"], config) + task = RunTask( + ctx.obj["flags"], + ctx.obj["runtime_config"], + ctx.obj["manifest"], + ) results = task.run() success = task.interpret_results(results) @@ -392,10 +416,15 @@ def run(ctx, **kwargs): @requires.preflight @requires.profile @requires.project +@requires.runtime_config +@requires.manifest def run_operation(ctx, **kwargs): """Run the named macro with any supplied arguments.""" - config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) - task = RunOperationTask(ctx.obj["flags"], config) + task = RunOperationTask( + ctx.obj["flags"], + ctx.obj["runtime_config"], + ctx.obj["manifest"], + ) results = task.run() success = task.interpret_results(results) @@ -423,10 +452,15 @@ def run_operation(ctx, **kwargs): @requires.preflight @requires.profile @requires.project +@requires.runtime_config +@requires.manifest def seed(ctx, **kwargs): """Load data from csv files into your data warehouse.""" - config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) - task = SeedTask(ctx.obj["flags"], config) + task = SeedTask( + ctx.obj["flags"], + ctx.obj["runtime_config"], + ctx.obj["manifest"], + ) results = task.run() success = task.interpret_results(results) @@ -451,10 +485,15 @@ def seed(ctx, **kwargs): @requires.preflight @requires.profile @requires.project +@requires.runtime_config +@requires.manifest def snapshot(ctx, **kwargs): """Execute snapshots defined in your project""" - config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) - task = SnapshotTask(ctx.obj["flags"], config) + task = SnapshotTask( + ctx.obj["flags"], + ctx.obj["runtime_config"], + ctx.obj["manifest"], + ) results = task.run() success = task.interpret_results(results) @@ -486,10 +525,15 @@ def source(ctx, **kwargs): @requires.preflight @requires.profile @requires.project +@requires.runtime_config +@requires.manifest def freshness(ctx, **kwargs): """check the current freshness of the project's sources""" - config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) - task = FreshnessTask(ctx.obj["flags"], config) + task = FreshnessTask( + ctx.obj["flags"], + ctx.obj["runtime_config"], + ctx.obj["manifest"], + ) results = task.run() success = task.interpret_results(results) @@ -525,10 +569,15 @@ def freshness(ctx, **kwargs): @requires.preflight @requires.profile @requires.project +@requires.runtime_config +@requires.manifest def test(ctx, **kwargs): """Runs tests on data in deployed models. Run this after `dbt run`""" - config = RuntimeConfig.from_parts(ctx.obj["project"], ctx.obj["profile"], ctx.obj["flags"]) - task = TestTask(ctx.obj["flags"], config) + task = TestTask( + ctx.obj["flags"], + ctx.obj["runtime_config"], + ctx.obj["manifest"], + ) results = task.run() success = task.interpret_results(results) diff --git a/core/dbt/cli/requires.py b/core/dbt/cli/requires.py index 690c12bfa10..74ec80c986a 100644 --- a/core/dbt/cli/requires.py +++ b/core/dbt/cli/requires.py @@ -1,8 +1,10 @@ -from dbt.adapters.factory import adapter_management +from dbt.adapters.factory import adapter_management, register_adapter from dbt.cli.flags import Flags +from dbt.config import RuntimeConfig from dbt.config.runtime import load_project, load_profile from dbt.events.functions import setup_event_logger from dbt.exceptions import DbtProjectError +from dbt.parser.manifest import ManifestLoader, write_manifest from dbt.profiler import profiler from dbt.tracking import initialize_from_flags, track_run @@ -85,3 +87,70 @@ def wrapper(*args, **kwargs): return func(*args, **kwargs) return update_wrapper(wrapper, func) + + +def runtime_config(func): + """A decorator used by click command functions for generating a runtime + config given a profile and project. + """ + + def wrapper(*args, **kwargs): + ctx = args[0] + assert isinstance(ctx, Context) + + req_strs = ["profile", "project"] + reqs = [ctx.obj.get(req_str) for req_str in req_strs] + + if None in reqs: + raise DbtProjectError("profile and project required for runtime_config") + + ctx.obj["runtime_config"] = RuntimeConfig.from_parts( + ctx.obj["project"], + ctx.obj["profile"], + ctx.obj["flags"], + ) + + return func(*args, **kwargs) + + return update_wrapper(wrapper, func) + + +def manifest(*args0, write_perf_info=False): + """A decorator used by click command functions for generating a manifest + given a profile, project, and runtime config. This also registers the adaper + from the runtime config and writes the manifest to disc. + """ + + def outer_wrapper(func): + def wrapper(*args, **kwargs): + ctx = args[0] + assert isinstance(ctx, Context) + + req_strs = ["profile", "project", "runtime_config"] + reqs = [ctx.obj.get(dep) for dep in req_strs] + + if None in reqs: + raise DbtProjectError("profile, project, and runtime_config required for manifest") + + runtime_config = ctx.obj["runtime_config"] + register_adapter(runtime_config) + + # a manifest has already been set on the context, so don't overwrite it + if ctx.obj.get("manifest") is None: + manifest = ManifestLoader.get_full_manifest( + runtime_config, write_perf_info=write_perf_info + ) + + ctx.obj["manifest"] = manifest + if ctx.obj["flags"].write_json: + write_manifest(manifest, ctx.obj["runtime_config"].target_path) + + return func(*args, **kwargs) + + return update_wrapper(wrapper, func) + + # if there are no args, the decorator was used without params @decorator + # otherwise, the decorator was called with params @decorator(arg) + if len(args0) == 0: + return outer_wrapper + return outer_wrapper(args0[0]) diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index a9c2d07f929d940b252cdfa5555e254aa053ff0c..3c70ad3fe4e42789208e299ae1de237e46920ac0 100644 GIT binary patch literal 205777 zcmeIb4V)ZRnJymkJ)fDGB!uuGjS(h+Ou~n(3L=Dnzy=Z^K}Cp7O?S<7b$Ys+s_INe zh|0PM(MCiXS@gQDvM%~Z*X823qM{-y*UNReEV}6RYrU@Px-KfZ=(^tbJ*TQqy;Y|w z-KWoS^8X1x&CHxr_11Hq^StNdJ*Q5+Zpj@B796$!|7(tU^^#NFx5=*Vb=+EY#;*FU z=E%7Xr&2sWl5~4(!ws$dt@X`CR=wWZ-s~^d3chRGMaOM5&)Hn6&DfjWMzw0Yn`dk8 zp3TKcf1*%xZShO^*JilW+dPR6z+cgb*J>{H+yb-p!tQN(wT4@;!A7B0opPpI&7q)7 z;onQQ@{D#N`f7V^*L8~U!O2t?IRQx0-!Wh7jJSs6pL$nzS}7H4aoL^!j{(@sy1ezD)Lh`cgFIWaF%)87L<4=|1Ebs zvILjGjjiUgmpap%Ske;Tnyf&|@?H3oCG{6-MSEhhQiHl^_FwFHzEhpXoJMo$UaQhz z6}i52cxg>@5h%H}{iasw$kI`zm%uO0(V*(`k=j}S@R0#_%umhut?m4d)cw+-@oa)FmL~LK=J;-aBkWLne6lPR;5y#%}+I6 z{pz{A?^;z4M8+!aKyz7e4HmW8Kf|Px2hRiHJJq7SFEF(npTp8iS2I+llS-R8ycK~m z>?%|P5Dy}3_)`-voba4!es}5EQjT9c;#89YUU=e(VNnN*_LSAA_<6SmbsUg(gjFm8 z_$m_{2-$^JsAPLkt2YN$@q(FJu>m9^FYSymzgEcCCebtVaFgqZtYec|d9MU4ar>yf zVr|B9!b&;@?w$d~*8FPCxAU$&1$wU*?7UaEpm{mKkqj~u+_k+hakxG=jV-5__xw3l zEGDP15;P4JY#!P*3mOXu2xuP_*VO8~%7V%*9bGzx%W_57LgXi*(E@30=jlUxY`dPH zwcILHN*m0m*QnRQCq0p~J9yaPKq0GhaD|zfS~U;Xp|3+e1KbY|oww^wr8XT_4NC$C ziX@FOqOk1-$InkyEO>TMPs4TK&~oQMM~vC#!a6t$^MS!C^cFx6>uy@xx1$)=FQzTtw8atq6=j$en?c&>GTT{Im0op+|I(A7XGSn?{G3>7&IA`dc` z@`fp_xRpUW1#J`jmM_|MXq2}d#-3L?wR9SP@Cs7`Ai) z)Pe90aO{wqe!t@7p@DIx=E9@_Oe%o7T;B^z+28Q+Dbv^yht@pc)W5a?vTQRw7D>xn z%yC-HMc1AOO%=tt{0rMoQ*veG#6t! zZ{8=JL6c)Kv+QE}~Ckaol zI9>_5((v-2^K5tP&@(VIE0}E7W zY(WaE7LL1sjrupARiY9=sg9GjxPitei7eke;7)is(lR_+7j- ztlb1}#&*}KR-7uBWz6wxP<@_vM>q525oQyHXa(0{4I^^MZs0jExPx|Rvf;xJE6f>X z_sqHu4iB)ufDW`D#$e8?;bN=8A6PoB^xU9bWn&;Fx^VQwGsYqr5#{kVlIJnyHP^t< zj*WA3IcN_X*axtFH)v0T@fjfS^L|Hn@Sz^*Mf%tSq5Z8vd%z$JvXm)nuLC3miKX$< zdX`(-P&$$Svaa+ze*Fq|J+!i}Ma#LGn}#7lzELm2uoQY>Hk4tH;X_0ZbUA5(i$k}> zlKD;0Ap^^`s#O7#D1|V4C9}!ff2>ibz#1)fD+f}yVNeJH*n^E3OJBwA5$SnWu;#+g ze>8X|AHYU6!tM+BtydgB5YGW_kZ0g{FCWYf$di~Bc?RMLVI0n2i1hYQc=0A5hPB)@ zFCRhy&!~E~;m?74XRvz>0k`!9 z7w~>HxRWacT?A^qT{WEMTEnk5aI_Q-=9aL*Tu?kNeHb3Vpa+I6Fw$<<60dmPRbo#V zP81ey$^|#`2LxE#ZEiq8P*O%%v^{6U@yCXx#JM7P^|`D+4{8Mm&_&+v04z~yyr|ga z;7}o~h@%;GIBQ^&2E49#16jea#4nYh znPZc;=9wkmuX|@~-dreFHJ2TM!*AbQbUf&d=UC^x@an7ZqTS&|Z9||2 z9E}9E5#D<$3ZrNsol-IHkb`A0s2Y@|0|Sgi%HR`MHJ9(D>aF2*4OyKk81!E~BUl(0== z>4)?5NO#g6w6Oe9p3g!8!R!iMVfkdOwg(bnGyy@FO3ig^vmQvE-7^r}!^UFeH$$Y0 zrw90WhaYh`c2mGM9SyLY06Nrj0d&WDy5J6IhG4)~*dt~j{6R-y1I2T~p;%NUiHq3m z9a=XCpg1nnhj_-Yu!I9!^aS?owIbcMuZ8%BpD)=K8;Xa_jz%xxGh>+9u)8B!zB6M( zj}X>Z*j#NY*N!3%UE+8Jy8@jg(C>+MU@Ux$#O+7IyrdU}IdZ3MV;96Umxqy#KvAnh z6ICdp?ORR-9JLX$b3~Y1aBC1*Xa`5y8o)>RA-jbV4j7xuPCRSkHP1cinxOR_VwY6y zStJIs7dNU9d*ChmN|uLFp7!_xO|zZ>kulT344jNR*cJWzAU+DQ4Bu~0tydKCPB9Nb zR5nL|$AM7TwPP45h2CD4H&V#+QSu&p4&+~CRoq+#gVpJ(g#}{2)usSDv_BO{z}~Qe z&3qruh@2`8&YH_H90B1_m_!c+!C7cGV3H4k4Zq}qpXONr5ZWsSX-%PC=i}F8T!vjR z$}W%=XBB2;6bN!*4Ahv0C)W1B1TR!O-o`VSCNK;i=BEo>bwY_X!lM1qf|S8Q&0?&e9Sy1C>ue5WFA ziLlkS%kLR&-xFqtyK>?|Fh0hm1axj#Q-LX~58ewylQtD2ASy0W5xT-iElXtj9L0`- zUeX*0>4GjHXhqo!h+QU`1d2@T2*dePI3T|kh92l90zojR3^G{Un}wo-I|hRcHe>-` zsKRV~Z#WZ+=EWx$jmBhih!6W=NCiF)o%~_Vl_Z5XgixyHQWz#eXd6Zdhc}0Vm9=&^ zw(QbiSu9*zD=jbe;mQGY*R=u<)!z!^wP2~?Ese?2OZUUyBY`2_UwNAft!)dNN5m6L zu$BVL06xN76VG&lPz&URi)&V(>l#&U5chUu$g z>AVqaZCjLhKCX+w4RKhWU&Q34+)iuTiyF;ULHmU9D&U{TR9Xj`V) z*&fUlZJgXeECv^@N*kd`;mcz%-@|2A9+-z}hQY@obcEYmH@0Bub$PJLR&15u1s2Qi zfqz)W5QH#rd|_2)tvRoCD=Z-9am@r{C&(9c^SIzpvQa`u@bCs^Br+M&=88b_Ac!h9 z{P1MBm<@k%5twfY=yR>vAj$^K#qvT4miA$Q1tTt2N7$BooA~Xe=ZANd?+3`GbITuw zA27Zze-wT;m+*dwT+=){Y-Pv2+?Go(iUnE&1N{EQyP$C0aQOWPJUCd(g#UwFmEtKic#r@8UM+HB6R<(%r?+8&t6z%B+{Hngfm57Khn_j9o8 z17~opup&N>kh{Azs144*x8VL51KSyTB)|L!r_RyZT7S-2cma&xfelY<^NHwkm@95`8o zAuHr3H|5r5Vg%(J4=;rwGrRp7p!0IJVdk3Ltevy>!Kw~4IBkDmG+v_hk+CfISb2^Y_E&P*K^j?nKo znf;KHhlr?`J1ck9IQZT8#@zVCL@*YcfQSYrl&z^LTz)9k>f^in``3Z1!a@zq&|b}j z2j@!};gEMXf*wl|jsV@24B)!q_<{6Q}qv{S?I z*tnXOr>N;CAi78s-h9oKwMMRBRdZIwtL5s@3$gauwV^$#^1WGhF~@hwKn}D*ZB1>= zL355CGwW0;LE{7sQE)NZft_X++&q;FMv=Kjoi}gT2*Si_&nBvY`36H={&~nMZS&iW zoI;BubNR89N?HMrj#Sbsu7kMXI*9+-$Rox9amdNo4;;8ZOgoEFjll=JWPOm05zCFa z>-K|-Qel%x8>(k3W1cG` z+Tfnrb%R@e4!ZTR_)iZ0IZzS}Z27s6klCoNO7Yitqk8^JYcA^vry9^Q+WRaF^g@GL z1Q(hM+FLd>!cpBd?cL2FxXr=EFgv~`Y#Z0}!O6yMjMBl=xmetkrmXyQtVdXTP(U2d zniSmh(SMrpXoXf(jN7nGjY&;oubjMMvUOQygezh%91%r>G>R~WUdz(O5%j1^SRn+`plS(iY57^ zqjdCEv6#?KB@IUkxvr~14sYf4bViQv93WH9cqS>{tCJ{t83%~s&7$GhB;fe23bh0x zKCr`tlqE%=QF#sqO|{Bi7(?Y65c9*g9AJpULR7KQ ztQL!elY7i^l4$M*n6F&VRTgW_${zzTnQav6yDK>_3pPAZk+f3@6`$2hh49Kxqha*Q zgGxHw#4D3@m`5;wO)q94X~Zm%^f5c@b^*4GGbJ;HqO1dwV`4qXwWJ(Rnk`2{hC^J2 zp*rG1{0YF(E5yDjT*;V%jbc=ak#s`HFZ2>3#Q5)MxFa!k-M${SIDJg0FfbEhM9dN~ z=61p&GlftVf-emSXu6=qA5kmL$3NuiUtD31j|dPO;i zlL=hef)1!ebxE@bG2?oP5z>4f8a5-%)AeE&(nQP>X*RIyXH6w+MB%StvYQT(FpGts2w&JYgVO z*=m;egcFSgP88L3+>dqwCcS<%1}j@^10QS`3*J$s-Xyz#@N|`4o`g@m0u5*T)H*(# zhCP(9tM?3u<=ePf7K8T%SjKl?Kj1j5ZSj>+K2JX>cE|T$_ZRFQSl-;2gFnLi;P2gZ z?(XODvbR~yzN?7zNS$+=oFXXIZ8QvJH`Z$1T54v$Aj<0XSDg*8_ z$vIQ#ggD&6FmyXNSF-lvnj2gJOJ(p*HoQ$U*R~S(!}-BL^fhai_kCfEWjx)#{V(i1o5XZ*uQBGUM!)d>dh2k;a;;WB=j82 z7eZylMf@p%qgTX(!J|A}XrVOHEJDi{^wJ{a{5dq7E$4nNXSNfPh&h@;(0*MnS|Mdb zE0OX|T*|zwhQ0RK^5Z^aR4#^LFgg>`j`D@sM8)2egP|O}f*(Y3Ai&B7a^Zb=Pod$$ z1|!ITR}|0@!~@uL1Q(T{qquO#CFgJ61Rk=PR_7suiqy?_%+^i9!GeKXNIz43+{=Cj znDlzt06Y(u0+UWgatJfO*2|3Wv0tKL-9ENt2^dXTr|D@o~*VP?)`B0k1qn zrw8#&LeEKh=@IU;9t~%^Pp~);Mp6mrSRTRrBE6VJM}?RrPITn19p~*hBe%WQ?pjK) z7at}tu$`2E&bpA4-~zKHNQiP37iCN(ab3O~Q0dj>Qa+QTEQN`Lk-T0;gbuGp!)A0i zqZhN#A!3&3P#(b#Hhag~-Gf|9xWCS9ITC6d4niap(?vmtnu4#{;0S zU<#x7@EQ+?@05H+8>HMHGF$G1Zil0~1vSN$`&q!FSGkMv^-D?|Pa-@#td|F&*e{~t zY{kYF_=#j=c?9$0dNB*lB4&wZPu>Yk=B%8@cWS|oRh%jY+ZXYTBh)J<;j5JJrfy8G z-QP}1`vbG3O(?ZLP%5e?uF_us7QHH6!uC*5nFeWuh2QFBLFn{9&~Ua+qfrkc(r5-j z+qV>qnwY8~T8Tnm8E#JrUjmtSFyt+w7tpq%KzT%*MM%mXTVsXy^soVjon|-(Vau`093i(iO`j#BA0}jBtdF zXgJ#uVvREKg;*ZJe3o9!qQOSY5_dQSb`yY@ag_)cdf*5fFSj1K#*iMK=dvMev9a~W zS5RpKq;Nke?>4jLO^7#`qQztrSMgPVO0SBS@?(@Jd0`@9q@b4(q1tQEaJFjc=$D9= zW)Z~K>P0Lxi-;wfJ&{M-IHNqA4R3RVv25O+2O(9;cQ-eZ628`K2@@&}rfz{|;_AE= z(CAg?0Dlrc1cFpM$swHFsh1O>(-s=e)@gJ!LEuI+2-^4RMJqIlXeAn*+saOKgZ)@+ zG%?Nzp1^gYaacyI!ooQojR9|*Lq5EG6D}u)Dh^jyU<-V7)e%P~Ym&_nWPUgeL`Y;;6pk z?(_wKq}QF6@dJipW0iI)A>>P1`Oyr5c4!$G zH8D*^v=Zrma3>rf51mB##sr&sV(TAHHgmhM+kp3%*;yI6_Qy0Lcd$K@JVXdD%enD= zD!~dQwF8l`Mx_Xwyda2!^uUW7xKRf-hD~qEjbAr2mk$x0kz;?rcOUkTQxfkx6tP23 zm4Y66EV^CqD6@5)aL^!(#Jnc%ohJbnz1}$#oNAP3XJgWui{%jyI884r!e38D!`c2C zn=z9iUo4Mc-l7+?=&%s8#A_$duX;G-gSCD*C?_B~2WM9S*ErCFclzNt%JvrN_SBkg zAY+c1TR)D6ioyn4_I6}oYn;AA-7JX*f{c59BshuWpCD)A$B#7m2X9(fx0z)j;e|uYKdui$D{=pOH^Aw{|2lu6n(p}|G}0_W z%lq}xBE0f%&~Ub#qwxoVH=04veoQY~A!S4>k@6w4@siMU$e<#1^N`uPNjTUTaWK9K zI__nU117y*MmBwtS|K@vnQ!Z5M)=r2qG8=W_7lBmg^wXxiI2_4ZcI{f^8tY9;~BGz zCp>4g?K$m@&2h)+gAZQmb)0bdhEN|S5;{iV*&3qXt0gc zX*7e>*-pJ^g(?xPM3v7*;ovQ6oe{(?qHvbkx=#2foa_nb>Rd04!d(kkI`PhUXkny% zSQ73UO1zz!%qD8ztCs=co4tn@Iz2GyCR0WDb2OaYyu<>7q=kv)5!&z2i&-=>h*{Fe zXgS`9lunC9!pYrcIY~Gpd>MiHOUjW+_XC_x{IT;;L;G+fLS#FYknt(KWC*|fI2!Iq z)Lp;K^A)|Ag{Tp;MAY&ZBYHnF=?SytNC*XPP8g?B69Lle1d)y zpwjC}OZj#Xy2cqM5=PePWkh&S4h?tYJzY2Hr|88jyazE$yhq-o_x@ho*=Ea;P~&i@ z#y}l$9qs@;dUe9On9+}4X(=n*kX^f=Dn z7seNc+b;@-uMXq(%Du3Q9S<9&ly`g=sSnR=*%BHZ3pE;1PF$z22TXc(+Fl+eJytuF zF!Cn7j0lC^jE1unO5>IUEX^W_@6wA{=o1l3^x4}=FDX@TrU(o7m}McMDn+Ml4cQNKC71)A?Qz|;cP)iSBDASXa+(1HN9wsm=Uc+%!kYZEurU-K}G83 zNwam6aIyCAI&d-h!tqZ4m0mw1%fqBzNDkrV7kar7UiR;3ShtrQ)(=KaOq39<#LMPm z-Ii3`d;lQ&SYej&gzvxyS9zc@*jFt*g6miSrPp=B!&wOVVIm=9TrVNQVV;MEvmGXg zl6O5bJzXzm(K#VziQAZ(nQk>(j)WN7Gt-naTXq5-y*d`(FQ5^tl4rWlsghOqv2UJi46`n0V2KH?esj1 zTlI1wRNF+u*{Y3>-$^A$Gf2(7Q!iSfSVSvPtjS|4NQq58h%k7sSq2l@Z?9F#PsI2X zz|kxGaElqCH%uf{d_gZ2Li(RW!`afOtunz%vk2m^>qRW&kBBAm?`HcLDML3mk`jK$ zYzY(cY!50@cAotV@aWYiIWvHeMsf%%zt+o&Q0gzyaJEvTZ83ox%^+x(41iG+^Fl-` zQR%a%3A9f)AoZ})Y&|4=sXbfFI!|C7fYR$w?UMtDI@_s)l9TjOA{=Qw8qRj4$hiRo zX(XLseUVpJ10gW+^sx|{h;fJU!>#?B2OEnzH= zG+nprfun7n~W5mJao1GlMq}R*HmL^gwB!@6_tzKq?k5$pIZXbKCUbMo;5Us?==3}oBskr$7 zK=g5oS;iBdGurl?)KfCv36S(UPPi~hs1Fkf9q-jkhj5qopy6zHp%Vgvm1Ysd_v=M0 zTm}(KT&9~vM^c7vZX_jq&}<145*_3cOJVY+Wb(v!DB?G_Jg4NVj*!V#lOOi{+74YY31SnXC7s;cS0Z zjVm9e7qjqM#4PdJKbE-i2D7{-JY;3yA(>I-7XheFIRq`g;S=*6w-k^RVVfdLlF#Nc?9^Y{?Nd?n@F;uLDiax z9bAQbIYbsIP|Z|GNn7LA+ltV$nn*;>KjcZAMWA7!VK)#y95N0(W-e zKAl%)JBCGAX{QoG?$Aq!aHhASVG~t&w_e1e3K6lSm+LJYMXI1TQ>5PRGs{9k$b+Gf zr|>lKxP~7DGy&DKrAzuFxI`jtrN;MJk2l5Mq9(ml)wPCUH^4b!MZH1ToNZUClNs$sm9G!kJVA;s29ijd?a8qSs^ov{(DG>ahi^dc65 zM8pz7b`wh_W$5NcQoe#C5DC)~4-xu-M;`w&3s#5vPrnH9%Njb*Yt(lUKjZ_$NQrUhZU z2!Dm2%pq;C{F6#gnkvRO&~SF!qI$~dkM&{}Eev9ov@r5hPJ92P(yz^yBjG)RsN(BG zam0sS%SJkLpw4wT=2VN0YZrW&AynW8yha_HB}gtHPPPItX`HKR8s7MBTAY(&cfdCn**)KX-CY}V@JDzb{Jop5=p3WB zoI=r9PKlx$J6VO~tV$*4+n(>`{F3G8EZ5Fi#j;hftA5U}!8`-24L(EZ+Fq^U7Hn^0 z&faTRbIugpV9&unHP05gxsnC5lt~+|u=mv~PQmfvdmy2e*o0-CFc5v6WR~}Y2d-fr zC{caf8P5b@o%Cj%J7f6uCDJE^iG-A`dPxyZcMclPb~@EA;WE9Lg##gGi37>Igx){N zls8+Bgdhi)AXC1_R0A-YMObV2^+s%zwKPa1BAnGrgb<;RhO_A!Z*7a@xqgU7i{7Lye_FQZu2gxCn+^d%oA@TR2 z;cSW1pc3g5Xcj^I3B8Dg3=#1kM1~KWEkQztBdmRmiX*PYCjgONEw=YLkZNtGk{bPv zUOt2dzlDZPXz-_c5ep3>Vu=QO+p<8apf^*b-hOMAg@lgz)G_U7oqjbTywMDT_Bg$0g_04iM9GKDJ_AC}A%lw4&1SQ8lkl%m)=vcf zCExvUHbBzbQII_jq*6!@A?55eQn&$x`y?!$shQspyT-}b1_v4v_o$K_nBfMuH8qW3})zHcTy_kg$ zA!dmWWlbrG*0U}otlybdf;$#1IBWs_x4kiGl^}#2 zJkEYJ%1^-0a=a)Bajs;odQ2_x3BWyoPOm=0&)E^S!bHN%htil)BSh%&18CTc9v{$) zSyUoomgvz`sP7T8|;jS3dE!)snfMeoiMl7d9 z(et!Lk&FrF2?H5kj+*5?;ej1*lROT9>2=0%Mf{ zX3-@eW{CsIyM*4iNuF=E90@^YZj-zMz-Sg>uy2iuFi0dK+^v@gA;MRp;cO8edfO!T z=w(C58`+S^J8K1s)Lqtvq(0}&mLNsgfwJN&QwIQyUSX5Xkc1(ULnygTFC{|aZ$`t} z5~o2W0+?nI#P8CJSjZ3&|3PH10=mpMm9r| zN+CIfmq5f%ezPS=XtJZ-k+%RS zy|N59LK2d~L_)|NdI=FSd@C9@Bg4D(Viqz)%n})zitycMwj2pDX6}xB5Wwgaq1XmV zWGIq}5Wk|A3?alXq2X*H(l`e}OS1^#C-fo~GDO4@87l5 zp9VO3h1==w$lvKDLP+*EXgFK4(WxY<;b;b_wt>}P)WjSU(MsfMa?>IyvB?J!2G^Qp zFd_V`-H{sri(b`-8zBj|VItw-6umqM-ETs}*}A9gF~Lf+2;#H#A{I?GB9>^sn@x_S z4BgyFN_dIc5+<~nxjS+^8YdIBbGER4=H<6Nk!EDJA9yP>Ne|@No_&$Qi08S@f)%ljm_9`uvW_UgcA>&EC zWC#!Y1{%(mCLMMWur!Mx{;^)fLYRnHBFx?vTuG^VGeubVwOJO@L>y`;F5+c}>lQIx zp(QlZEJDi)z@c%fD&#zfhO^}yoj()2(F}t27`3c#BXUCK5Vk_0l2S#Ye;0?m~xA1S`!Vh;P)3Shx%# zmbgqep(|2`Zf+zcyxnXG6A~Td5>0s}_-;U>SDgd=NqIa!mrhVg4&mfpy_^W0z7Gv& z>oht-A~hV%AZS0K7p>4JqLpZLZfmVNpDetE!aBm++F!yTQ0xg{9XA=x9qxb91oCp z{2aFuI~V>2iUZD0jv#@fk$c1f_F`zjW(*IhhlkD9L&B4S#y+a=_*~=(fTY)*miZ;u zwu-T{%-X4hkniXvM7Ytn&~UaJ_3@Ln;hSjG=_Ba`>reG!6;6a$B~Enp&bnQ2rsi_h z#>}Mc=4$w%1ja0WPoV)HOPDR;7u@W9b^$&j0q(=kUXOZA;8-4f`7IX?mN(7KRAPQ& z_EChz-fho-lDXVA0z-4Z&c-gO3O03nja-hc^7NJko4kpqCZVQJsf| zv;8%;@IZ!qu{?tL3cZ+x*CJ+#*G`^a^>D~nob<=NTtIYg#`1w{9O&7(S`|Jx%6c^i zz61n)5+=gs`1x0yj$+r8W1l5mKknMsHsFg-6OcU-*c#tJE370Q2m`O49|^-b$wR^N z2Kt4if>~Y@9pH%jpRuaW6nxi-QXkJG!n;l{LBd(~p-8Q$)rnr?%j1LX<#CuCQccn< z!odghav&7h`_lN5qCI81v?M_qDZ&G$ittG^Y{sb`(TiC)6=Ie+)z*uqFqW_}$2i4l z5_T{io_m;s0fvKX;w;F3=O|b9;+&~b<>!Gye^jkOkRdiR(qfU+<2TK6l5od9@RRGg z#^OHsV}R0$4|cJb9)1y$G+Xk|g+8U12;r0ehK4&5bfdbbTAPhEBf*d=%p;Zed%c*2 zpb@h~(DGLzdOrti=m?<5#3p54j)VvYP=uj2;xar2!044>-xO?s$xm7FQA;G9RO|72 z=@3$UE*kDgid}CxI!!NTAw|S2kz#JAU#o|~rdk!o@-R|&JwJzB!>~Wra31_}QUhDe zmL?&{Arz!gNnDbb0UW)O9Gq%YDtXtQg03gF%0ROSEm!KLMF{g{XxNM}r}bhM!bHpx zVK%VS4=0t-^-ub_SqDBwTXaFMa6Xi4&%jp)tx6Cc%~{oA4l84B0=_3~RSJy?4i9jk z5WU#vV+FT1!?IXt6^9l0)o_R(#MC}d7>HIHW_eGz&$bR!_r>qX(|X=Hd2 zCK5Jo(aVPLoHwH3jyxwEK@hN6qlH)u1>!g;cCaCUtt7 zUM_?V-;9Q{bw~&D1TD=Xh~K3bu}~o*mZ(tiG!#-CMH{5tA2eI;glY$|`Aq%X+yj6{ zuWma%P~{=LJP5^p9t~$JHacD>6&%eVwf2}^v_h?jR-#su=bVrdn|u&q@Ox$%Oz561 zIkHvP#5We70w{V#AHGIHs0|Yd4bSMMLCF4BXgFK;v^6GJX%<1e>_{+dVh)RlCDQNa z6%kT~Zf+zcTy3_532Cx{1m6#z(B|=gMXxr=`y+%fl0z7o(94KW>4|7KTcy!9n81x@ z5VU9NMJpO-L@QC~vu~EP-*h4MaFN-1Nca((8D_m#awP!L>rCx8NQg4qse}$oFCD^( zUWJCUohb7D2tgW2Cs=EGu?iPLtP&S8`?3cqve`!w7WbQFG2s9#VxRU_$t?gzulPHA ztK<&7Bna8R6%A+0KIkI4c&p@Yy=aB-5v@e{&%RYMe{YrCXSS{r?ztSN-qLqU9t0$M zeKYpn2x$Rhd8Enuie65Hw|)r?XM1Z9r0jav*%Nv(3!g>I5}*BJd8g!uW_eBc$0+lU z%r{D&21uPaN_>a@;n5xX`C8p}E!;6hcetlrL>%NddPxxuqG8wmj8$-Jv`kBQ8Y#lS zQQ(UDX7Hj2%h7PQkJG>cX_jafLA+KkV&UP4xG|Yf;!05BL|Y}-a}MJ>$u_{fooKQ1 zo$8A%cRKcpI!_{GoT8TuQGJ`xaCY@|KcG5WFIrJ`h*r`#e{yF7PH}*<9Ng&!?y-cI z{P-IetP(fncH!9$MW=x7Z@F`PTjrzQURBKd|sLXr6S7(L;buueXu?ccgYm z4q@nPdKnVF_Ej{j+t;4di&pp=qLujCd~A#(6*nIMh(3O7mhprKtxb4P>T!NYX`{V(J@+ilcFUMut>7OsPcC9c!W$~ob?n;S`;9AmbG38}84 zQcYQWp8!;PRodB-$?1C85DojOXgFJ;-Oup1>P0KmiD)J2e0IrX{+3KGGh5dQKaI`f zrHdyPVCuw6<1yi(c1+l_?II1n#PUd6HKmsW;hnulfZK;!kXE^!N{TUSsu(^R&TeF4 z+tCTySRTQAqh8FSsX@$=rbfP1z4xOoZZ}(wgc}W_!c)(>xEs)P;y|4Tvgzpoq|VLwbHbbLoI9l~|Kg@!wFovu$7_^DpZ!gUa{#C7D2dG99+{MKwa5>gys zQcO8oVBi?tBJ5^SV4SS7Vcu$YdK4U_dJQv zum+xMWMaAs4QC5BXdSx_Egr8Ivyd!emPq#Goxmh)SoN%#Ivh#PKJ8@ZDmAMJ+g^*7 zZ-pPn@NIX-sah4vdiS@J8amBvX%j;2<3dI8!~=O-0gB$%wbMf|w&^87w5u1Q;cS^k zCzhlcie`{1yHYP&Ay7ms5va+tDM*P;K8P?lZI;1===-_o@!JCl)z1PJy{Zp)-wV91Zbc+_k?Bpivy zFtX0U_zob_>rL$wFNieTsYI7Q(#wbNqJKie*%}U32(e0h$n4=1 zq{wC;MOa*PEL4oX9k+276CS{qm(mWxSP5wKs=u>cok!_qLFoPnG@Pybpp)q0AdC%q z(TX}lv=Ze%yQ_2lc6GkMY+Wb(lh3`Sn>jB4D0-bUcG?AL17mrl(b}Px6ydHHqv33K zRSg;K){9vBTHu8!<~Ay^fA4dg{l#=MAh=u z;odj&f6r_=5=tCtD>3Z@mQMj9y;=-69up~siA0Id=w(AF@mFZrj1rgSz_f`uDq`+f ziM{VeUTwA<2_>#*D=}q9^6>yjuMoT0kvyT74k5)8(QvjD)m9eH)Qea&iHKMt#NM_M zlg6MoQ=|#G$Sey98H)xrZCCP@fJm>7>DFVyB+VksSbCWeihdOu&Q^4Et%2Z;W)QSB zy=aA+5v@echs^$8LeC+Ciqy@1vvrg3vDNLaB6DZ*EdWceqmli`q+&=8A?FUgWeOJKAnY zQ+FwE07!bp+vx_-Q}mJ{WV;CsXUjG^h9|Wg%^+2GwqCTN^+dE1$(r1kNlI+;L4?6e z%rck|zldPVKZUj%@aR>3xZjiT8zvGiY`t6v?N6fNZ0)Nx*`8j+LjQ=(tNS9YUgSL&Mn;jcidPNF(V4>pgn0 z3UMM7_t8XWFKmICybjqHlY#9YwJ`0(|hf;Qab#y$1*0`QD38cM~0Bst8A-;p~P{ zb)(=(dNGUU5iv`eM=g7Q$QVG2MIyx)ndKzWjP#?Yr0gWR2(Wb0hIFpx_MRcaVmp;E zahYBwgi~%u!yPHQ>zzb-y_kif5wk?m@Vi@BYCt)8>@*@Xsv)+z`8n{r&w z&mq?h*WN^j5(mGW)WEyVmL{RdVWvnSllaEx`v8w#O^!L$qT|{HKhHzLi7x}nE+8Cz zLN7-`r5{DZ*(zPPYp!mekt>vHP62*iHEvtewmZHtH(sz%4-Id8HxHUhE$D zY}dvd{1M&@p@QqIfb&ZoDyX>cG?xFmz-6p6_So3jgHNsPwW~R23U09H;Gddji`-nv+H1q-+29I$U%lcK93MUz7g~wA z%RFHq`g+_f?+G7V!+cQU7IA0%Apq0sjNy(_GENB-2`N9-ONwy1e?i09P8VBLCSYTE z1oLn8VipdBm?aJ*AExxa;M}(kDAG4?i*Lu|$Nu?F%Ko)tf2eif5T+At7Nt(oWl6dI_M>t6{o_^p3+89JT=e zBXrVh2t8NmrANs6ax|PR>*$Iz!5hsWXbXDL3P~eciKGvi9i4=pLk1P8n`_P1O~TPe zSznR4$Mgn(q}SWXPEt}SB!>`lKrb=E#omC1b-UP|deI6OL$neXn~yD?q~hiS0MW;L z%`%>FoRQFRQumjB1Q6-BVLnVGY&@Ws4dE@HM8nzM5*yMJu(3RX`4PRCh0h>n ziO-lC?tjy4ITA|b!+ky>Pn=ZuV*sO9gklpZsacUsD)lM7WC$Vt8ye0QqS}D}_j(Zv z86skd3>Ei<67m&okeV8L9uR6`mdeYWknIp_K)GydZbG=n035x-?Q~!1@p_36l6@{3 z&X#O+%uZ@Jnn9}VG`(nrToJ8At|qs8k`kMI5MgkOSq2lr=WChrJ*Afc7QLz;_WYV_ z!y*z}-SB8|JDy2+xl%7L!Vg}ChO_-37$)4cG#Jo{~ zsQY^4+Nng4@qOPCNT9}uT(iv9$E z(JM}J1}1WVzwoIc#2?95oLC`*`7p;&eqLoPW+0$|5r{jLxY&|3# ziN`pyPRIQzAkyni?UxCNG~1~}mrv{ELwM28(QvjGMP4N!NF(V4>!Rbqq>1SjVwL!i z*?s?{$Yvi!SR6IWV!{LXI#k;01;+szz3T7m^@0t0SrEEE0S#yCKIkO6c)j2SdeMsB z1JO#9|Lp4p^Y?ne`DW`n;h%kMJ|ca!;0l1E*EwS+;F2~lmPZ<`-Fit8?)pkJob9fv z6X`vAF$umn&MdD92N~lIV!Iw})XW!3c41E<)Or9gb>Jxp+XKZ(f5L+; zhqwnc9t3#H!HiBVN(_j~-&hD~Uxk0R9p9KtJa5#hI}^oPp)mtn87tuKEKenJ5s)VRN|9a9!XtA6&4#IT)ru z5X)|DFTWly8%0umr{Zmza%)xJt`=L(r7yE84clwo-r8_O3vpn}w7uCkZC7pAf*4T? z{(!g_1jpbrY4FDYe?}e*?}0!2gFhQ?1^ycHYlZxzRoH{MUaPqTK)G|R=GauN=2vUJ z9m(8wSQ9>;SMaNinf$(~nmf~i6bNRzPCZ|!&7eL!FnJVDfo-uYB|q!8vfM83tP=&VmUDr{Z^?-^JkWc~it@2$6 zd`EL7?Dnp?#eC7OL$!lp2iYt2LouCVzV6yn&c0UZ-13cZdHD_SPjjf)n5o15aNmc5 zBGbbx!2#X4@GU${S;10(rmBbaCz9%(-6+=b`({uRBe3HbHY4Ywcd}}R?dN+P&*2(d zkuN%)HCeHt?yFM{R>`tZ&P?ykK>+Uo7~W_=%?DTJr<@8D27Kpjfcxg=fNRY{4WX?f zN@^5c0BXEC4Z^87GmhW7WMsU23tYUt{3a}K6U)20Zr7QqJV-h3+we_5{AyTpIiyb< zfAaH-t@2ygEpNtKZpB;rAQ}vMEc&I^w&f-NKy$>KgGOy%zT`}oD)4_$!s1G;I^8P2 z9j?E*d?()e7kKM2g^~sG0xr20fgJ{O#&Y*G>a7Fix58Bipb}~Yr#b~x1`blb9n;G< zp`n}cp9AdgH^RR+ms|LI=_T76lg;Ja3qX`m0qt$gJPLqZHAlV1B(UkT7bt+)DG$8A zIYv^e)(j8}$zW}&{l`)iOOd57!$ki3`tp0QS9}-#Q@~!~8vN(&_~)JQPovp~*ZQz? zAMfN>A7ED=U{@Vzuoe)rm<`sbGaKK;Vxo;Q_BM^?P`fPwAPmM3Y-eYx$hTAAMSxm( z9WQS*g_4$cxKlGHkXHuaO>tK`fj^XA*e(>yd04ASP#2YO>Bl6xf(8GCQ>VGy=sk(X15so;jw0_Wo%t?8j#@r)vCk^ zADHFDrTKtN|C7ioyO=KT!*?2XO#&e-`+cscV-i*zcC38M~Jo(4;bW+ z)mHGXe1gAE_RinOR85FR;KRHMbhp~EH}rXf^Ic<953l1?hcARD$o{q}Q^M>2QPqTT zvJ)?%bGjFen`&rB0=mW9&I!U7z{7j&;(m9?5M9vK>{X1ld!nCXACUd(Dw6{Z?a)hs_Lm4=q7^QfT{`Oqz{K@VB~ppG1|U7ZjATU0q?PgC#e5>Ro(>k zkE)t5PCb2XjZ6JMj8Q%mP#(M`m7w}*Rh|UZKUXzjoa*?iS)A;n)|(oruOMW@GGK!4 zBjDLRj?-7GnlMiHV)#z%EE%7_#2D2BA=UWE1kGovawJs!LRAySX%1fJiZN4mxiQAWPBQlTQbOZ5sPZPLe~qdM)@SDo=vycdME(PPOD+4bJyhjqx3hzoQ%M6Qn<^%9tSii>fA!lOEgF z&k6sjF~Y-%eYl@HLH56>G9}3Vk*W#fWb?h{(f)E|gQ+24I4)s3a}zWV!E<{Y{`ISx zFix{{Coos`X~y_o6^u{zl8v4T;!jp(P3U_<)r4{4rBT9{8KZo7U7=!I)$3EEggaH4 z5@cVhYQi|#=~2QtV~nq)jC-){PcpaMtIC<6-BUGToc7cx;eRv6`j}unJxX|&Dr185 zx2c*iPP%Lq{#j#u4+VUujlw^z%9Eh_6RIYRQ=J^6eBT(+0|C+UG0JyTITAE~OVxyN znk73@mYis6EC?MXY)@gfE7Q&-@91i~ekfmC>CuO}cvParvr|wEQzIVExt7^iy zveP$7Y%|9B>Ne+Ohel%beW5CAqDeVl)r4{4>8ay!7)<%f+iE?V-`8RmVe zTnW1WR@H=Yy3=QvPZ;A|wB*@4QU13oYl8T%tC}!QeCizYH^z8hC0duPy(qs{jEl}PbK&@JR5=s0->quGIPKDfihZO zA#71Gx=G@6W4w>9V<*U5A9-+MPOaFwS&xeDYCaJoj^+<-?QvRVflI|DCD{<1Ay#&0Lus0vHXSeA5`) zW9yu%<6~DTo}7rE{DUfGg7wE#O&DigIyU)FV}uX23CCk|lW)O3qe_(E`d6wZjB}kn zHaT{asqwc^I6J3U{=UP}@c14_-)mJ(7-v0oY;vwK(ns4WPam6{txA|s`ioUf80UP< z+XKg_2SHLi#TyQlj<)EwG3t-t)JvY9XxArI=@T`OS2bb024thdn~kwP$XQPt9p0!) zlVJMKR81IXIypML*BH zg6l7EM}(5y|saX%d<~Mb(6HrU!yg&+M`1JVN&?jnO^Ig&lox zg7cTF5+*p`rE0=B=hE5zeq($Gy=Lm>h*zr;CAgkdHDR3V^x6Gi86$j*3p^k^W#!?W zs+0-V->z!HIP0mi`!5(HJ!n#e;H%_AW^~Js?sEw{ts0X=9!j61&=$$@HjgyD)=0DaF65c zqf|{8XF4@1c%Cu3JBbQzQ6)^M`z5L-jB_rH3Qim2`(RPQqAF2>YfIIHajw&&f^RfN zct=sen^Y+itpB;H3FEA%Mg>1;jPy>Tg8!c?VS@8}R81J?JS!^rzl_n|VN~#IsZ85GcAt_9)7B+k>Nq3f-B+CJ&p}WR81IX zTM`v~kuj>Xqk?~;N|I3ZX{siS^DK)BUTuu%gG2?dRHaEU{W4V(#+gn=1z&57=gg?! z^{NyJmgiJW7-xA|aQX|`W%3?lL=UfH=b6G89PEhd1FDM9vURZSQtJAF3!&&C*E8OS?(lgSTNITN&hU)6+h+Eb^K1E-l9 z`HczI)88Rk4v+6~^tVLSgmKbkqmoU=_#O)QPFt)zNtGv|@AaxCj8mO@0^w!G_zv~W z`OeoZzm)7ax2rNHNZ+Pv!Z_*D^9OxngomS))YAvARb@($T~#$EcbRPs|+tQ~o&F|vml+2W~*24t%$S3=q6 zsG2ZNcl!6H9Ak`+a>g5#O5U}nU{Aed^gX4@m>|8NYQi|_vb|JIV{{KNy3<~;JfO;w zAo>leCX5p;-E8t9V@&gQJ2g7^0acy^)$dm|VVvsp=-}TQak3@59bRROX+Dok`LxkhsvHTLU!iKkIL)$U zg&U0V%o~riMTOU>vLuMUPSu2QqEiz!HIQ^+%s!tnZ|Hvf!=>e-xs4^$W|EQ`7Y&Z=>AVt6UOP5uFfBC zjPap$o^2P?kLP_ZJh{i=`LU`djB}m7UVNc3!pAt_Q<)3K=c`gC6n?I%3FEBG7L4~8 z<9m?vowj0JQl&{SZL69v&a`xR{$^uT2W?5}sQgW;L-16ieZ~k6+LG)w zC47z;L$yf23M(?FwXX3uT+~QvD&kYG2I_9jZaL3 z49`%dNND;6swRxHELoht#u&{50nPFQN_VT0BzS(MstMyf%OWMOH%4^ON~Z-#UZ+Zv zV0yo*3FAzsF3Nap(d-KvBM&i_i)gmKQL>+@eR#&o2I9 zFwS){0Qj%Qhz|NidED>EsuT&9|F5bE<1A;a(64xrsR7?g-aqcmUZNj_=l3}3>r*vh zoOj7fXQvwDd&N4hYSm$!>8HJUwpo=Zq4ABXCX90}jcs3HjBV&8L$*_2Kf7F&EW!5< zRTIYfmPK0k8KZkd(47`-ZKzTu*mhM-7-w4=ZGDF^x+CgNjkx}WDp`W>J5)^==Q};( z`Wa)SA1*YW9d-SbDs6)KkE@z6&U|Xr^?SyMUrmTl-~I4^R7n%OKdEZMIPbE^>*6y^ z4Gtsid0O=KFnD;6Bg6kXWZ9NSU&oE{4XZifa3wtg`@G&6KTg$zaeYf-gMVs_?#Ng? zrRt7*)-7QF?QWGHTex5W{CJ%G2qNf$ z_UVJ#N-<%ac4^@1Eyj3{3=&g=SGTB=CHQ`$stMzKCxch_8zUQ*&%y>=9>DrLRhk6T zA5=AAoM}lQr{qPY)4(S(Pxs z`9rEEjB_pv5&oMox`XyREj;)WRhk6T|Ey}lIMed*;OL7@4Go96w9`X_!|>=HM}`Bc zCXBN!2@O8q7}de-G9@f{iYiG$)tgjJ80UG|^J}gRN0vI31{`Kdw!^;M7~!h|ZPRBb zHpcE!rA;t@nW_on%*z6@uQtYb&~m2*V`o)q5={H5CX6$kx?%I3#^?^lC7ByG->yoS z;QUTi6UI50#=SphjPGE!lN$4WK$R%L^(R$L80R_}^ZtP`qWgKvEsuA9SCt~c^0!q@ z7-uU6jc*~i;cZ-(U|m_EBsm^Uv13f z?W*rOFg)DfD!Wio+ur9jhcVBy3y?ht@`Nm&H04Z3l1AVv-?i<$Q!P{)MP{NoGUZfk zFYnjj2=&Q^Qz_!jL(`SoB&4y+z1FR*<|wyg7fEV=u{L8t+}83fxNUv&umbxr;MQso z#+{p)tW{cNA3!#j?&N>n-r8^jUc)rDy*cPPlNF~ry=Aht*KUcw&$TP?cGmXh5~m2o zgMWx=2A{N)7)UT4C{G!y}EDKCI(g^Kv) z@%W?BTvVvnF|V8lA1=?LA6)~#(NdR-khHyQB?bjye=;*tciu70b6Tg`&=Q zkTMviY{A2|wwI^bttIv^D|y+0-w%|_@Gq>ZSJ;onWEovNC{^j|0BmzvOgg(;UZc6N zjOAZnuEMQQ^~)e2+yF(i%e;67ziJh^VyI+!dHdQ1BLc5uGCbfh3QKPlSC-$%Omb=$ zx$q$dkryO-KI(0K6T?;0wK`JEECyhfR;)?8!tgbhPuo?Hn!T@8{!6f1I=4CG)k{uw z-=<2fU{!3;XnvP#RXyMj%v|c$_RY0!hl0Heu5PZ#SFBgh<^A9S8*<98L18Wfvv6u# zrPW+=5wq^MZfKS7hI?3k`LFS}_rgCnwcgO0ys6b(4g_%=fO>AC{a5MS^1I=(^|5=K zgO@NubLrMU7_km9B+D(7TIKs8G0o(_PIgi5Qfu07$?$$SVv&F>f0CJPuDo>4FV(6! zg6$>{a6fv9V4YDN@co8X`ChoY`~hYH{z8ZWPWTHg2j<_f8HJg3VRXR3gdH4uYKW zzTR6@qO{Zu)=kTjRU{ literal 203622 zcmeHw3%ngwd2h)3Jd<;hkU&7TMm!1RBm$xmMGX&u8%QXL4+`ma&fas*o}K+-W_FSz zpiopu-H6CWMN6rAEw$ECFDiPKDq0_CsYQyYwDp0OdeN&^(NeGN{l2wkX3aOVX0p%P zYkBhf2|vv_XRVoU{_DT~-+Fv&&6;bL+`4eVQ48?D&bZgAIE{l_?8bh_Z8qw5!|!%h zp4E10<#S_6w{$mM+r7HGv9ri(wYobygXLz)cWt}uxZTdnwpN;Td#l@SG;DY4fu_4} zYkA6_EHzzQ{1X1P74G!5PT>RaS2W^vJ4-#c#B9BA?zX&U+b!8(qtt9nJ2TzRNKmHm z@1AEv|=Rac$3c+amEvzjVfesw>87YXQJV2H2JBD#ujL#w(YBEU@~!m9zM* z%Uru=Tb>;yEb*IVYc~3Gfc;sVuKV4c{Ep={t1;8IW&r;}8w@c7Ljc^W*+oz|tB+;; zZ?9|d{B@j!Vt6mBxrT++ee{)Xv*};G<;pPc>MMcbs{_M@85?AB&@WoGTJu10y8YVM z&K7;wYIqH5Lm?v>dkT+NJL(ujB&qNDmJIkGmCJO>xirq(pg2X0xWU+sJ(Kt zZaHBk9S?WcL9tE0(e&-2YfppT8zsBwwJc~}4sj&I%mjCBFH9V5&CX!U=@mVHmKBT1 zX}khWLk*jUsAfT90RaK+W8#`-i&t4txs~H8$8%Y(2wRBa6f{~Ot(`o5WS?!fiU%yW z0hJPg8S~n$7Wkwma`pxfI~pitbq=mjuQwY-xDI_C@)_VkaOk4la%#<)uxeNmI8ZEU zoDqd>H#mNAx@N(%gL)cm0f&}53p!%Vb{4k4S(pzDH=wrwf>?Lc-MJIhxRR&En%%1* z2Qr51R-*+@eF8p5O!sLS=eu#|&s51%rF9dT&Q15W)b+91n_>G4=v(PEC% z?JT7co}iBh_Mv@bZMog%_KxT3 zEJFWPu{_6v9;ybwE6o~o3z7ClSl;a%e*tr+tSmN&@r93Q+5p2E_GSX$8QLgUX8Iij%0-O4VW%sWYVdd=}F z(3OUl2c2istwYbi%&cIxVZ_kzooNUA-ev8^zDDx^v>vgCdnMLMSH8s)m$SRtPPq#y ztXeqk5{^H*o8n0`aFrb<1?XHm>caV@97U^%ZWr|JETeK_vx0pG#H-&0>9|@bO#^mkzQngJrLU8CbS0(!Z1sjw)Q(fLXcRQsBC1p zl}(kC_%9nO&*s;!VAn${>sqv&Yq)6`5)|95G7L+h7iL2l_82}y^gx$W7PvTcODvh+ z1RXN4+-z7iFo{wKvsW>jy#2=-bxN$!Vz+W2EgJ@fAb@??n6dQL>>iO`WCiOi{Oreq zXYv7TTqEqhfZtZl@dNQ3;s$vJj`xbe?0`IpX_03jju6Jt42DRLhQf=t_%N*Hrg`}g z3V24tvn{t&!SP@W5V~D>Z%}0%5I!uC!Oby1Vt5*WUB~g@n1w$F?yY0@8Ul{=1sCvs zHMo;21YHDbJ*pZ`bF=NY+BjMY2XjlDIR4nMlsH!euRe?Q=RvLD0J_ZE9e^b&jTaTW92_cy z6>%J+4rdK)(tvl^jb=DLt31W4i$9h1)iBh<`HdU)xyDb!t--Y~J8i4&oxT++VzOSZO_mTCTpTdQ zh8Acs+02ZFj|$pK?3Wi_z7b{>mv8FY9Da0j+3}z^o@Jf)!pkqii}r>WMTS5PI2s9R zBfR%i7z)bLPfMnUGmStRAi+zqlU^C7Tj(yC_Uvc4ug`L|kI=ArRb1&L{!G$l|e$F|U zoV$DXOS=5YYs9U5eg@NStKRLbzHs}_^Ioz2Jh<<|3tv`qwlv+D@Sb6usUHMSnZe$w zv%FP?!7ikrGNI@zcy`$Ev$0d<^3HJBCv3q>AYdsBvD(lh`7keH#s;~upqXb$ zD|r$GTtJ(w83i6#&U&7MjS&B>4l^ofGi&X71E`Lp!)p#gGYo@fo_-9!!*8}~_I|qt zEejhw;bnC)6oFS37|tgmq2%{rwi_YW(Wp+rF~)s!XLCA8z`O=4#nasNnONd@6ftI z0L5{kKEyLdg(V!=q9?FtA0yIT`wEDE_{EBCv7va#>{$E~J~M`y4ZAy*uYd!=e3-C8?XNMhV6=*fB73>%^0S96?BD;U7FR)cA4YIv4y18s$24+#!PVUgmy= z%?r5E0q~GuXaN-teie_qg)UY=Le^V5j*9$f1aA}S4Agflrq-=}&@FShV<-lGYeHWd zT(Zgo7pU31%MHgo)dGr#^-yEr%}!&wi6Ig;&CnYZqbz z*g|u<5JeG3E^@q*U4u>%==VfBFdjZe;`S@UytEgEIdZ3sunS^3%fm=Vpr|#Wi3SwW z_ARFdj@k~{Sr_J(+$Ka8qTooR0eplXvRkU)fU&deq%$X9_pFm&6SUqV?2?9k0EvO@ z#q9>f9(c>XisfOHCmLU%Y1T6!GG;oMfs=6uyJGMl#77~P;rr3ldPS+|l#38VWpf00 z90-M7JC2c3=$Hjx+-q1E)Gba~rF{@k^eS)# z!}HA6EQGcjMeJPKjVqud!%1&2Bg zq()_ccVR3998RE#u^(N=!-IQ;t)qo4R&&t!>ZUI^{IBR-Cr))Wj_@wj#Uy zp0VhjFhkr`5D$X!F)k&bbHkbnOj&*KUKpB0RICJ1agmD96-H`VBGcy>b`12A&QM4f zbO}K#%4R_9GRY)RWMW4c&Y!{o`4uqqKsONxf;nZ7!Q$R56dl|#9AvN|3;03fW>f&#+mxw9O2 z!7vw&6*qQ9*@Oyuyda|ASqRN%XM~06py{{xvdv=D9n9Hmh-5=gi)$4S{RDmwb=H*N zs(ldNg^s@n{V*;dK(Bd2XSfAx3@A$0gmoY$!RNe?>}}x`;GAhItTN)2v5~>f&agXO zirxF1@LmWtSulGm!T55EWBb$00O(~aIvHBR`-r7cr$K@im8s4s+`!tc;z3+yMAC3@ z=e4J3>R34B^X%F*4;>tWsGtRFP;3SS1KiHh%v&7@=eRhBg()Al7iB2^s(2hdT(-lp zL}gLMKhzlsqa09sWr%3*>I}0Riea?A-B}K!dmJ=kMO9ts&(7Qajep<&scUE7;K5wc z#>pMTVsPQAvKg8bzB~r=JzQqxfqAHA73N4TZ?#x5+qE)Q1O%H8T6V6pmM_=jbT zKnMfJ7gp_nHS2Y6f(67Pu9;x$1o?t)9v2)cHcDt64{u;bDw84YtOz6zf@omF4^M`R z+3*(^f%%qz0oOVZMA@LZSYE2Y(mo8ZV8q4h2-|XR3%|Ycyzs8--2l0AR`nzB1IG8& zkHOE*65bDyYdXh;t?YzX*naT^i9l;$fZxA(547+|+cUy@zIV-Svb)~c<uO3$M6j6?}^3bT*&gJW%kP1*fsUxesPCu!{kg4XsAmgR}zo{Q~Uzz!_XO ztccGd0}xK^tYDV!12DP4jqxdmABDL{`~n1VTX0h}zxkQMS% zn0A}>1VIJI!%Jbv%x-@T(0K{lF!P$i0lQ!ygjF4Aa3X(TG+w0jkn?4!3ni;juxehj(1Lb@d6a8|f*O1`ja@GAeGQNU zx``;=TmT2eG9GYhwLnfFo#0}$1ABukxOut|^!tT&iwhEE3q#?3TWEF7HyBb$qlZOq^6*MBNq_K=jV(vczYe}{XQv|-*h;&7-z7k?P8z6>bBeNLG!C?cdk6=JN7^a=c z$j1pJUa~RB#;DWg!j)HVE==Iw2^hUi;$rR$47MgV;k~$zr6BIbKk2ALUy;N@%iwVms(}rV(eAb)_IeIp) zr)>qkGEAlv$xKqbY8pj9qXtpD88n<|VV*swLM?$AJ?tPLWeL(q5w0{lKY}%pM=&4Ki-XKf?Rq@4a;HeyQGa3O~eh%KUIQs}KlW*9sWJghh>t z#{o<<=*E#azMc!gJ={l7aHi1-akzs)>nOKSvG(Jd3S0q8A@I&E zye%;sSxNfg{9qvZnla0J$`99ZKa}`I(i;x|GQHl2?>=Ktm})jkCA_>|FE7IHu0q54 zemBg$2)D1nZf4?OG>agZQL5Nu5K`(w)#VKK2p06H&vtW3BOJNsm`C#v);}jRy z1~A?P@h4nFz{vZSS$J`Pma0EfgoSsTWg(^KV7?G4D=Fd+0vx>}9u6Mm;UWyBk!BHE z?$%3-kn>-o;e0s{ayhe|ZbZ!S41)GPy=aA$5v@eZ*K;XHd#SPI$9<-#TnxitbS9)7 z!p!4(nGrtreKf4w z$DY)SR`?jAmH3$3gO4S8iNMXy;akbr#*Jl$Ypa<^urH$D$=rjheRv zuqZQUJmooKe5e&ya?){D0VKVSvy_jRX-gI+5<1rFr9-&OIy9W`F2Q0z7&#@qQX-FF z-lP|^a2mucaTa?$ILxIOXd|h@^8k%r6~d-soNmJ#m85D# zGGSwvUN(dhFGR!nN~8mNf|h0x#C!E37CJ=45*?nt2O8dx(*kbx#32&w0fb##9uG9Z z!W4{_!fQMnCQ@{dO zU$KdWR3h0#9>ILGUd%$Xh*_f9=k5k33s%A7J7QoTAr8rdEo8WJAN7h!_(~GI@s^P5 z+;1nPy~Av2Q%W5Sl#1#}s`M_vqF1F$*jhD}X^=)(xJNGwLZ=@=!}&Um&n}5b;~50) z=k=l$3PrRMh5mlH4Itc1J>y_#QOpvdZAF3d$RnGHRz0?s1@F6H0}MNQaIDKWNV3g~ z!C)^jG%)!f!r((@8BD1@*fhY^n3Vlv07kFuN8#l}Jj{r%BEtKc@z{4VlMwTSUSfnJ z{2Ln1cZ5WvOh6~{2jE3`d8Xrv%xbX~v*3yesXcW;(G&*}W zI~fV~y|B^51SfbBCCYUaZ22 z5Ua$AE)VxN^21y3wX}fQAQDnI0PhRq3y^Hn8H`x?2A;Ub1dbKFBgd9aS)3%{CuSc- zSbU#Z7E>+|td@qFOv?XmfTLIbi(xx;Li;?4(C|sUGzj5;91Z6SKjI@8}LdKd*{3m9ZDa&gY8A*Awqa5 z$xZGD2v&Ni9f*XrYh~C>13?_52R6v#h6vbzGqa^Iabzg9|tUYy>lcu9i+(4n4>k9$Ri%`6TPemfBg{} z&iB{EjF}Ai5_tskZ}egoUW=F|UORPe!^0sTtf|6*69Lf$I0FW_#(^HZX${A^MO#s$ zsWsiG!yL1)aRLwLgN=;rEwsSa1bvUSQxOjY8Tb82a1zNw!SW{RAw$cc%1q4KIj<=X zS;c4VRJlo4Sr4Fkah2pz72@TvQu+<1WG0bUK`#x$CHjAJEZSg4d?iXH#W=-OF}9%L ze9xv~1p=045yUUmi&*$IB9{1de|zOfsroZTSh&b63n?!gVg7Md7+OjC-(G;zi~sfh z;ws%+M`)y3gce&bEy63O&~Uz-P0ME1`$hKW-d!JqzrSpk(BU*@asxyK(hbY|fX6riTpM%^#rBS#^z|xC% zCPND=qeE$M*HG#$xO6sA`>A>v5Wd-ec%j#WY0frP1pEx#VVajjV34#hi9AC4@9D)X z8X3eaX=Jn|7e<9%$6f1#1)|svks?u9{|!TMS9o=(Id?w zqWqvPYHvXh63P2jPVQ_y$H3cKuN;#xsZ>R{;hi)AE&QxFLUd%#|h*_e? zi9Nf{*|n`u*x4*p;Nv&2hy5U&x60rC$L*E-VHZ0d>PRW?`7Tl)Tg{d&rP1+FqcPgZ7brPp=B!&wOVVIm>q zF};KchxrZ~&UYBqVB^p9VisC0(IJ0HNpl%(C!&!(DMKaOi zT6n&ZS?OvtoUcVXuqS9~7D0TXUc^F)h*+XT#q(B3aTINka-U|l+$rU*44cvLESl5? zhqD2Z-Zr$?^DuVk|$qq~#sPq%SNP!;GeU2eNT_(dUMht2uR_E5(xuvvX#&yd2BaRoYPKFyz7)+C^Uf3aHh|LWP|?W&M4eG8q2zJBln6)q zJ{rz*Z+n^7>m@=+e;pdmmwwPy%;7|VO?uG^@grJ^_@Ca(Jb!zcUtqSbQ!Y9jPRFIY zna=|>di^tTZUAWs6M3ZR+NGBl;jtH@;e3xxYyqa2`N25h76^iQuU^c;ZxOS^Z+}_8s+hu(u6QpX z>cti3u5V6E%=CbHW$}pE3NeQnuTf&q?hq**E)+%oVGy&>Ea{rXZmG1*Adc89dJ0?w9B9F9M z_v>Xv`0E$YaK68)#+4t@i&=OrVwQOA?@CP0L(1`$gLo!4^BZV5Uzl@`zx_}zS|LhAD-q?> z@wfSlzx~Q=U8mf0fVrnM{I+bgH|NZWDKtyjF$K&cEz>YO&&XsvE2fYQ+-W$8PZ)|| z2+JeDkNs@}?|mW}g*`~;5v7u1oNTHX8_{rnqe#1C0+wbG#AoP5EE+~cENK|~i(rsa z^=FE(aGqHfQccD%dO&6{2)#^@*tH$atJZE=p{zD*jv%C zZWnuxUbMo+5Us?;<|7tCDsDai5PjTbmhqJ1tPC9|GZ^w|K&01i#=U(`s~Bv~EyBm4 z+bz05F<<5?=JTXgJ@CRM(at){9yA5@MG4(ld4g%kbWUZ@ci?!hZJE;%WR+ zHzPdp>FvW^OxSCUa5v5I?_mliG-A2>m@}<@;}gUz9i{z zpJ1g~1o6NMFsyGOR|pajO9VNW@Ea+^Ty7*KTx+(3DWMKAp=QMGCIF0HarPFs+p3oY zAA2ne#qD;Ot?QI~4lwtW#_cWxD7`pmcKEC2c$wiZ zmPcBq*XS*}aLTN>-5kPS;a65j8!Z0{&lRSM(LlrbZHwx`n6KB1S+p>SS<=GD560~O zD?B%vEl0|GhEc^=h2lsKy>16Iy*N+ffCgxo$)~crq>z65B88gAxtEsd{Zwe!s)(_hVz|HwM%$hFJ|FD zh*{!5@-CtOulPJ^wj3!z4lzMye8OkRO6?+Sw*6K+G0IvRBoYyhz_T?@ZG{L2(Qv*9 zPffJr6IHL-z=pqJ)ouKV=SkpC-m*2_WJhpT!T_}WXd>(f&`kI{nhJjp&87Q?^?KP5 z@e>%d3hI-`rKr;1R25(l$C5<&j2ubTh}4}q~bn%HnH|k zatI~c^->}v{t`5tFL4@FB7FkQB8YeCMJ!~9h<__G+-tT3DH*P0?Q2{dNiEg^kzOrE z`y5EMMyaGm+j{vB8g$XH2@PJS7qQSFB9>^dzby--3i>le>g{H;ETnYIr;b^tvAhS+ z=+!XY;y_rWS%j4j=w(Hy`OnaBzMA8k90=Zc20?p|UbI5Vh*qNHBW9lgq34J}Me64B zX6q*9Ut_GF2>eUF`{7}Lq}R#F9tTn>B!>|5&w7awF7_=ntlPz&(2G{M7^0Q9*nDh0 zAQd+s0Ej-GGRt_%argiW94B-4!@!tszZng~VMTwoWhch_$xOn|7+^6ns~ttd`QD=% zS~)>4X5mAKS>i)^Q%a)sybB5IlV(eh(&Q@Em?uP%9QU6Ip!7E9;fK!%Nns)(T} zZm-`#`;cBDP_nl!T5!|?{IB{&_^bLC_=J4z#^;ozs^b}?{ywP}w$Lr2mFU*wcgjeK zO+JV)c)wW&QywtLJ;1koJ0<^b0v5gU4|hZoZo@>v!*})aAlmGILBsjFrwuc~O0x*! zpXfy_w2z1-+MmmAM^c8l+(=6J8?z-$X_F5sGPX&MjO!L>Z`&l-z;lgEuEx=DzA%G! zcMcz}*`OD#5GA6Oi1O)elJmDs@)WamopR3s)^$j?Np1%yz4RQqwn@HRZ_!0>@!Q=d z`6^Sz*n@`i+ZNTpjjb26Xkieuq=k{snET%*>6tA@%6sy+NnQtNdU2rN<~GSU>t#qd z*$rqoU!A|rZIZX?EvHa4mQ$kWdD@~##)R{PfebJ2G|PL+1AE>kc@F^7>x|)!NKyx3 zA|d6|dPxyZ_bD`-?{unN!UK9S3kO2X5(koZ3H@)AeAH|?Qi9CgCiw(_(JaDX-x?KR zkVr)MbG<|e5&jen=Zo;j+a`I`DgbTbeMn?OBJaEvC{lNM7n1s1VYUPr!VZ*`T%tMw zz~~h=*$hb-A~}ST3B8mEi9Z_+=S!Rhl?Y&(MG&8^7qMt%5%F(DhG&~CK}v>)-zK>m z5b4!ov>}pIYm`cA^fJAC2o1g(4V%!QqZhHzAR?A%u)n>Hqzd{oMe6N8AD>vz7MX0%hhV#`NU!Wp*;~50)?RwD)B_mpil8=}TiG-db1{JBB z51Or;lz;WGP4bffNw1TU&5)#0NDd+9b9#vpF7_ETtlPyN)QeWQ7^0Q9*nI3#Bo#Lw z0Ej-mZIZ&tgkX#f|MqE+9tUHK=*29Wa>Oi= zp{WSpd1lLz5@YT*$yWgwy&@F*A&Cq{G7(}?FBw9Jm!si)A<{SpK})j;;<{eMLWYP~ zB16S3j-)t>Hb}XzG+XYJY!9YI~1f zv_h_kRw7rEn-)olO+JV)c$ZlQQ^L>N9ri&$tM5lgf`mrahO40E}Wl<+aLB}{2EcX#B^0gPU8_O?6n*Lq11!u$_3 zoG;9v-JQek$bmIr)WmEG(Mm-5^zO*{+a0;qY+a|^GiP_?1VHJScF}9)M{MIFi*i1E>HQP#@Fqp_A zn0M&KEIK~KENNq&6CJ8lv1=_jckiGd9F)I=QnE|UmMrB_BTV&I zg~~|oBX9stFJ9IAmdR+9mJAT0R6@oTddUzT)^# zq7_m`v=S*FG0UWco+Aboshj)E)=kR6#(6K1d&td$fJv{Hku_RUD~Ab23l4S#q>)#|gt*g!(X%&@lqf z);L)g?lOpm^WB9GqX<@-MG&voi&(e}B9^$!TtZi*40E}WlyH;T5~d_N%q5!fO7Izg zMsu^ypH#&2bLj+iD4s(&*{+upq0^V3;e4IOM@XcG;~50)F1=`lMiH$8`I8A z8_s}-v)v05oW@CZ*726Yt_IFgTdkU7<6-fRhaaKxoyJUIy6LhT!jppCb^|WoZ@aEj zX4itPbI#gR+_U|X^UmExd_$C^4 z`dB)_dYxXZ!if;8#ECB7-Lgy0^lYKgu20!+p@|<#V9et86x#5ygaZ})f}4HNF2P46 zzv4|>9Ls|*zZJs4@|M|pE#W6-A4OQa*({4G7YO1>p(c~^e-FUXEC1e>P(GlS z1|j@EL&N#P54wptETP<^7p;&!qLoPh=_Qo;TSED~*}6_S=pY~MNl%z~7_jK|&Jh?) zc<}L{VyR-6_R)qvkw==Xf7Z*2@Yipl;e3BhEIg1QUm}lSenKy1;kAfa;l27EGOv2ESv=~OPr>az1~D>W72HNQgU6* z<(g1WQmts|jq}*`;{NyUGv7`^a9-#E%gL7C+55EXWnl1U~La*0Lgz(8X zpy8eb-EQn_G!G=2kzhy_=8?*~RWD{CXv8cLwEUHb{?EaBpV@MxL^y;Z47HJz;oSg6 zuM7vKVFOHY+DeXEV(Fw>KdF}vA;piQ;hv;8=PgI~>%}ajh?pf(EbR81tuWZsY`|C^ zM(VET7m#Zh_9q(7!(UEn;1RQ>NeOZU1u0aLl;q<8N3SG@r`xq!(Y2?c>q)FK&@4jB zPxR6vg!v;hY(|*B(TiCK6ERDK*~U&koK!;BKjjw=IPfvrvI}~J^Pyb34&R`)YC(9k zU^U7Gtc=-7_@1y;E46DlJiviM{9>Pv72IZ>WwFpI4lD4h;SfJasC}L=5UmWYgDN$# z&zbX{a-o&Xh2pACdeM47q}PkW)6>ZCB1|M~Y|zVw@SFk~?#Xk)5d;C7H@Y}QFJ|F6 zh*{z}^3g^AN2^!@Kt)55Gve*hVxZO_bd>w zG>ag%^&%D;M8pye_P5KEl&U{dgaywm3n>M&@p0C6(dz(=UJ27Zp@c-5MM!zGUQ&dZ zZ$QKOVvcWZAb8^$1nq5l(F!3WT8WU4n2ni)o+Aboshc~^)=kR2*ia>0|1OptH}w^a zdjOGMA0t~rNu7`!!po=i@*+I!Q)pPXhdrPdt?)2JEAg=T*sDn@Zax4IeLQNG@s!`J zz=2h;HYatPCjgLMw+T;AA;gD?gp8l-B||vNPtkC`vm{3G1Z*OYU_R;?Fl}OvhL|NT zV`_xI!fZKGLS!TSj1SM909f>DP#m2?Dpn+uI-StVh0x)%(Qv*F=|G;KrC9{=>3R{1 zh8qz}RH%3w3Mr1F4N~s2&6Yc*+F@)yGrv2x8_?+0ZLbHaT&9-?q1acW;e5r$$Lpkm z;~AvZ9KC3TS`n>8ttQVoAtg5XAj04Qvkazm&z2n7s%vVTeLX$Fk^Wp>5g}!m%Z;RjA2eISlr-5ug71e7_$B(J#<&z7xgXA0bF%=>+SNW5J|}IU!<| zxRBYGJxGzwK8moo$}Ed12VfEVthY)w02saE@9nLUll77yWWNy&=gU6mBIfW`$r*al zioOHUN`(LPTP5@NR>^s0>pJD0%W>*0eW&DAfJCovCf*w%Enp&#G+9NxoCt5d91Z7t zYY?QI^RBbHUd+O05wpZ+e^=fqxza4JDgPK_{*n7e$(sREFOHJjp?`FIhkmiyvRw;z zOwk?gSr-upd8=MhgoE_IYk#rj&LrM&=1HUg?=e+?KS9I!9zOT}_%6L@g>NHTiEsb! z-EBAo0FDB1XWF<$58e^u8}{o~$!%^a?7^b}%1#N@W4W_@SKpKkZ>T`|1fQ=D#`O3j(jUWfTQoZ=-Lw-D_3_sg`IQTB6CQmAH!eRdOTY1AwWQrf3c? zDukPDNN+9Q#PzUVMnp68H8h;>#WXlbz|t(z?mVU!vG8O>Eb-+2c3zQE^=FE(@H4Y4 zq?EjdDmheGQp`uK*DYqcXN%BDvj{x{fJ0;4LCAV38qSw>Jf2AK#xn@owR+JCNh4Z` zq>q@5Q-q!)1{JBB6V295%F&LMI+`clp>jH4)7y2B?OUXFNDg7>OuY;VU)zR;b^F>5 zy=aB6AzF#A&BwkfQgQPEfav2=vy7)a=$MoTWu6A;05rW06fVw^DhLw^Em!EJMYv4^ z4d=TJ9bOTvG>ag9y_10O3E;o8%YUoG+V-yR991}W~|-b4yg31w6}=+ z`}MLR6#8B?oUhQi&+zZoi&m%;(Mr_$bi{rBBJTH@t?QJZCg$v}m5-r0WuF**~0w8~K`DaPZbit&9koZrYKb~6*Si9CY&Nxhgw zQ-hc#O^tk~bN?qLEIJM-GO;?(%aL-UVN`hLQ39jCl|_ZXp?hI}ae~b))bd zbi#g^Na)z5mk!}N7oy>wTxZTNrtj5@S-1{jmbi|*G4KDy^nGT_k&@yNlVZl3=~n_6 zy&{a3FNg}GR3gLc^im;YcnuoPmm!@$6RRUD-onr{h1;x++mi5 zl!WE8AEBEMSMX32BXgFWZ@g)m_H=aSzeqJwHp=3lWQSuS9 zYCz~YVo;H~dB|+tr2LDIRs#Q$FJU|ekn}nkS*ReDLUIT(Pv|8^xY)m;VcjnFlwP#L z#SpE;#pYuLfmGal03iBUc05$0iHS02Jmokm+1LskCv)XuEg;hCH{pBeg!wR$uyKN3 zHiWkvhlcaLB@vb(U=w)+^Q2zP!ec_Si?`~t#Xc{suIhult~rn8P^=x9LSIl#gg7%71!S=lt#Jywhx5r~H%8y``Hu?*S-!oilM* z0%-#id8E<$v|dt#yM77{=ew(F$mjvRn1$0KW{K1Ot{kNBs99c94l>T>>GZ3z$tcwm zfTNlAiYA2_8t=@MyW)K zx9g=tNby~0IA4l%`GA0>Sp@M1^&%ERM8pyy_P33gl&U{dgoRI-Wg#VF(V%ASO1>Wu z>D4jadQ6z4S%jGf^)e$A{r6}%U(xZk27))9LC}6%FIu5yL@QDA5wri7&~wC~B6ag) zvvrg5u{BXwk-IbbHvmhoqmli`q+&=8A!kVeyurjAaUmMk?Pja=q7`n2XeDko9~*;7 z#mxr*qK{{qWjy6PtK-g-xjXq3K&97r!i~s;{Vda~vuGX>v!r>{viFCK z0kl{oQv85fPEyUtAbLv1PNGi$mR{PB-qjrK86qr3sf3C9^fDn@lfOa3Jt=z5JBhxk z7qd__VwNac9?9*0C(%EfEk{a)!%T%)yNG@aX!PoEVA^tO#c3svb%T13neEf#kB%~ruaXqVdfS*rs4Z`yG^zkpn~U3&{1N*w-j zQUhaVOOsOMC{v`6Npj=!GXalYO^!Q_vg6t%zsN(vsSnG^E+8Cj(#w%h>2uI*N zb9|wh;EiVxv=8Y;E0m0AB}zVGHgXbrju=#=ZoX@_Zc_d=#=3~yt))K$NP3-&Y#=3- zLUIT(Pw6E_xY#ezux=MywgHTqn9L$tiHpt0&Q4Ns^8tY9V~ttHQ;xGTbezm>r6&R+ zy?zt!ASKL)iG+SKybcyBn(yz)uJ@e3k!rkpFmy|9F`FXjdPBpVef6Qj+AueLf*i zom6)zpwg>Gv5Ayau}CJhYo%G~hYX>}KS0Cziln1@f|h0x#7(`3MfD+Ki5?aAg_7bZ z+92h=+HAQ~vK?V9DVJ^2O$ql#fTLHqz3wZ$RWA`jvTsAf`I3zf-boF|Gf1_)PcK>_ zS41n3tI6%2q{Jp4L>Tn7QLz;_57x5!y*z}-SB8|JDEv%xnD0Y z!VkWHhV%U(7%KTGywRqTO2)aL1WFd(LWF4l6AkB! zmJTWiR+>c+4?PVJmlVxJlK(UI__RT zq}Q9GmkEe8qg0|xTQ48Ni>A)5%7pw3g#47P2v-|!@kO;*P*grFSr@d=v9AjuNT~=mj$8wx1-^F-3Oh-99}QDQ!iSfd_*fz{?o4)%-`z; zA2nOoDgPW`^AYK*1)m2fdYv*GFq~ycJPuy@)F6aO67UgFXFR4 zj{jtWc$NgjaaiaQ1brsG$;0D`uAcA;bAcmfhW1y$&xMLsEUG=53jFn+@M?l)Ig!ue54y+w0!a-E?gi*R-%@ z+SwVHu^YB)L5!#ie?Z&|f@AQRH27nPKcfhS_rafo!Jlon27itC%~Em7D(%BuuiIGy zpxoJRXMDQZ^czjzj%DsRsskU-EBTFfy?AiC>DIfD0>MnzX%$P&I_kp%lk0g3Y>Qhr+mI-}ff_!x2baWo@wb7v)@ z^?-^BkWd5k-Rf-!d{<``?DlTD`tZ&P?x}VF2#|7~W_=%?DQ&r=1!U2G(KU z4!CdV47t_;s3EjfMoEpK3qXxGWT6QBFMa<}?ccFSAvmYeXF0f+{J9?O2EyJLC9Kh#<2%|fGguvl?s zDmC~&C}DA}*_i28-v!s-P<;p9`cAy{_)^6Jc>$MPi@*+pS-0GM?N;|t^=)v~A*h6A z$!Sajm4Sm)Z^88H^=Rk@{O1t+`;Xw?8>(IWz48)RqV6o`UI3zm3W&Bf^C$pv)fw~J zQ^2OrUZ4PGr#B^G0{d9dz*G=Bx*|l2!k;K z+u4~aict!@2+$0#N~@aupDsIdA}6JE~=nkRPY>O^96^#HkSAbcvjX`I-HX2$hV zd~1@nf}91NLGZg$vyJZ5t!(ZrJ3ILO7NF;i-AlTi(U#?c+dvg~@O}e6r1~+S0-DHK z59AqUh?e_q$ORYv2>jR!G(G`8gEk#=Iv1XcjTJ6sJPTScdvdDQEbV(%fxigi?6bHv;RPhV_%2LoT?rVx{5RPuUobhL@BmyE3{_eeTv3l0se!YvtEGV z!_g z4IX@wpf#~~klnWd>c{aNtLD5GR-`I6nk~Tnp6PSEvtACe~ zQ++TiqJ)4gvwiTw@POVH-eugnRhc}?E!;n;p$mjP@ zTgVflU3ra#H9xLW3?3=w2|WPmHqR#Th)YU1TN-Ppu5#h9Phy!oU4pcJ-UHY z9iAqUB70VqDbe%oS2bar?9>SsobEe}(Y-RDTO5^;BK(h4*%E}`rfR}C;ltA~48>>! z%@6U49cN$}TJZN8nxFjsWxao#vhbm)&^z&6s7$=>+fyw#4 z!WiEp0pG!Uekpx7RCy9q?^88loa*Gug`DiJF|t<>vSA@RMfc6BTnW10qH4l8-HTya z;Q$#V-eZjFp^$2PWQyjGsB$D|{;;YE<1_~^B*v%q-!jJYaL6+(ciL&9A5mpV5dDv; zCX5p;f6a|IBEK=l_?n1u@&aB;pWL^nI$T3FB0!$0(N>BYG$xT0Ta3wJJw~=2xhiFix{%Tg2;(@eCa$ z?74$i1+q3qT&c>Fp!%Sy3FA~tH%7e27}+Z$vNN|v{D~@8g6?;#nlMgx_DcQV8smIT z#5vg}km@AwQ)NvM|2L{8j1y1K&FAx-$Bj|GBBDGvs69pY_f?q^WPexHgmJP5`5P+D zMs1cXGz^|(Y7_`#@1PNnJvv4DGI)NU!@$L=CXACVonKBGV_dZ4nKR6jRJjrwpHMYn zobK!y=EcT17cF`Ic8H5qSrf$n4^^I#>tj%iTH#uy2m1cM@ROiI?s=(vLy(=Th)Yd!X>Nq z|7479G2_Hj2PEtD-&EyEQ2ljP6UM2Qg{%JC7}ug9$qHEgKUJm#*-xpOFiv*nvhlM{ zHZ>F%on`LA@d*H+&$0M%swRxnE?qc2*BIquES|Y^e3mL#Lgg=2HDR3YbXc}xjBAL0 z2Qy81VAfV;Nf14yYQi|tIg7_{HpckTg7N(2;~P|Y6V$&^)r4{C2l??gY~~T)B=J|q zcpuxq&Oo{<_Tbc@@Pn#+3CjOm)r4`%r4f|}jqyDi@SPb?`FmBS1leCvHDR3W?0CvA zj4{57GM=$w|1(w21nvJ<)r4`{GovYMH=7zDjtkbaSL|2AAJXI6MsZI|K_ZlO5C?HxsF#LU0js(rGQZ-?m=A41yHOBZ}9rB$& zEWBEkH9`DqRZSQtUOGm3pE1g@Eoph#n+pF_l_bDcdz`GPURv9lC}XN*uj zr%IV%{WGd2jI*9OLiwRF(sAa)NzWdj{HrQqg7e2zO&I4~Hb5DkG&LZEgMECX91F z?(Ksk>4R_`o`?p1Pb9Nu=0Dt$uj+f_{%uL0Sf{Hih5hdJw68}esV zX%bAARZW;@ntxn|j81Me&2;e0)QIGdRA~}SU$1JyIMYMHXMFbAvmT-QuZ__?#)Tb! zaEkK}sS+kQ{|i+U#yOYH?jJJ7chGBQF4O;mDp7*#2UJZM=Q?|K|4U(O4WpM&PU-eqDn)|jAE=rz&ayNrIQm>uBfrB%1&83t zeUAE;tC}#*_2Hs|rx>Gs4pG4^s`Lr9pR8)acn!#+f_sdyewe7>@2S!xnBJ*s!aUQG zsNez9OlL&}eN~zS(^sgPFwS&lRPbHK=gFv&U$84aM3BI#)iE_1sA~6`y3zs=7{Ay zFDm#PW3=}e72ME2^95BC#?>y13SMB0@xw#~U#?1%XjjfwHDR1-c~r1wjO)Wj1szqY z1l!Z9CXBN!i3;9ijOzTTU`LfC!Sf+i6UKR#MFsztF`^F>6}(H8Cc*Sys+us)bUG^d zurZ!;krAV;+RaFzlSzZ>LLqc|${D(23M>nvOHQ_V}c35)yBU4YRG9}3VcU2SS z$qsjW#8-jWpK5BD9g-bBHMRb`4j$d-7<-MX3FBmE&n91DjPX^0yz@7iyhxQZq45{0 znlMg#=5*3B#`?HmJ$skQAE+`WNWVT2-C|)vr@EVVvsB zv+(XT#&@W1&Udk8`IU6X`9D<|6QsXK)r4`nEbh_ya~-us+urPz2uWTuQbN^;0Cwd$od@5 zZdHy1%@?bhFix{{tIw=4qK6sLneYGYSLI1i?WvkDPW8aF<z!HINjM_KKisV#>Y70?OLtq+S9P7UNZXrlqzF_^iQaoFiyH`FV**q z(LKcI&U(S}+o~)HqW?+NgmI#!n@yHH&(vU$x7(S~!G-YPJ_mvSeZ*3o9Ua_gjBnnM zq@1g2!Z_u!{SFOdZ1c7|>m{gt zsw@ejE2<`p6D{BFaI-P0N0_j)-+_9IDpP{&H>;X3PPSyX!$*uU&F7IBAKCe^Do29m zzfv_}oMzdw!Xw6b=8Z?zqQXC_vLuLpP}PKSqB9p2{?i!SykpE=QuviBUxM;qsG2ZN zxpYb4_|wgfwKEqKj)e#JIo4jQYQi|x=>>(C8RL0?b&K-lgfmqc5_;aIYQi|lIm-zp zV{DIew(}PgUZcvHpnb2Z3FEXcw%i$dM9-UyvAqh}o@};k*Ycfa<0|&(RQ&HcRjvfx z*QlB>PPa5d@&RLvADv)4Ggk6vs*DNJ-=}KAIO*B3lD{*?{CYX_8He)TugaaE{|l-n zjMJYPHTj7#_K!`opB+E>kt%b7{2!{CFiyT~3N-$FQ=`N+3Es0NLn{D5pTosrRTIW3 zPY)MQH%54}OO+2CPgUhgD157`3FCB27mHtQjPa2To^6-2Pm6tpDp7*#OH@r5=Q?}& z@=9Zbk8{GObJs5qs!}FcZ>yRx&bn;<@=uKMJ;pMt9JDXDwn*sL~|V{cKee#+jBcVqRp7>rpQ4>{ZPF zp-Pot`#e<>#@Sx%RhkFL_T!c@rUwJ2@rkLBVO^CX!E#mAgmIQ7YsI%1qj@NxS$eNXf^H5goMBSpkx}RcR7T|FxEqD3ALc2aoM@RDF!9 z3F9p1EEu0bizocryIN#Y3*Pj_9{b-@_{HW{yRi#ZZ|AeXu*Kv#wd@q=ULI$=c!U9RDO!83FBX}s!l#^{cX4Krg_pHU@C@O`hU3FCaHV^;rajBHpn3)^jZ z-0Cq^ngr9|Q8i(lX-WKKV4JB?;LrvijtI(%oh*gt_Br-jq-w%A&(he*W@Ai`aHcck zCYw}=5}JOFstMy&;lhs_qdI8NGed0=S;*&)JTs}d$S{|{9Y#yOXT2%q^PQ{(ZV{mu#xJ_8=y=YV{@ zstMyv%fo}`7~^`BOFKI>xLuVhq3f5ZnlR3`Bs4f~_7RnmmIPg6CK zud&=vq@SCM#qg^lB4c~QOczAWU>O$4+c)!;f#XQe0 zLG~2L6S8>Hv@;V+S_xPAu5A~cMyb{=GZUSa(@xFyihdJL^qy)vwKCp3GE-|#K^nW< z>)zDujBz`5nWPSun{^8UxR!6hZ5umBmDrCVx7mbv?reRkS?gAP0NGi(oBwr7chj|a z4GZV)>r5D;h9clC)fQAMR79EmXm=KsS}n}0z72f1dH|qRZ-L)v zsjKgRq@C4w;=i2%R>EEGt`-z))VJ%cn&sOULs9D{buTcPTgK|r_-iipa* zxQ-uLid`{MvAm*vMVk?U*D)C$@)(7cw}~sO_cD{5+C?sW7eM3%iJp&o+t|Ty)l9R6 z)G~_!n58vq%C0edo#iuj1Egjj>{cHHyOpy#BVMcGG!Aa5HA_~_28|Z?xK_gh{=m$| zZu8)5_ZBGF*Wl{Tiek-r?QGExF0dh|`Z^TmGB69Lh}F8CB^NO3e)rmL_3LmC%db9y zzkLJ#xxV|R?$q_&&T=4#;{epNlhI$5v#S3Hmu*bk+Zn!y5n9N${+*b0h#^^SsnV_f zDjVo4VD1 zfxD~UW+vb-gc#t2ztD1E{!QIB)^BHtZ@Zx4tGn%XXQ@*Mv|hI}6x3SVcYySjFNfv_ zJ^2+4=ZbTjk`EOFEyPV-@Xm6LwN2gXQ((F?a!&I=qt>*_c!$R?>x?&DXU1t*wIVZL zY`YFTt}}pMQw*A=&JcX7pjc`)eET3uu(KMPN+cQDL6B3?cYLn*&RUv*Pe*82g3cP6 zSGQU%XtywDJW7ne)tZFV_EF|G<0Bo$qhvnP<*9 zGjns{-yz2rh3@JTtX>F7#FejGz2-u?HEAw%xBho7bV#o#uSE(1ol@tkOqn!&+B9!tn_v~J9Ukgx_{%SRe~?*E@kM8o3^iyP)Q_!4~;Gu6SM zM0KIn(6Dk}M_;ZlX=tm)9>JLavy~?{SLFwXwuz>8dA<(5p!#y(q{i(b@j>{6#?pPY zzL~zdCw=uRLubsc^G&aA@I5-eq28C{Gh#97Ct>TrQXhtmhhHmCcs_kD3(tZS^?*>+qb1-45(A7h2T{na#A4%FFXX??Of z=E}u+g3fl(r!+bn8aF){3Glo|XRFKY^5GSKr)tXf`*4?_+ckQ1`y`q<9^v-Jve>pj zkEac_+1(xf7C`RWn~oQSY$mcFe5pHZCfA~M&_a3Q^VG?xU}#ob+Gj(QT9wRGRa!q3 z+S7L)j73#*s5fMqhU946c~4h>?#2VL>i)!hcu-JlaTX-24T)V~nC1ryKTfxfdMgC5 z(OcCDz-Julw;l7~F}q)W@7v$>z#`4gN9B!+)vxFo-JAxBm2r;#G6DTk#_4!T`nx zskv#(;9g^NOV7YT2It3rc62g~H1u$yi(mpbhT&>tr;lJNe-0iV)S`vD*(N4>W}UaL zvcc<1r;L#CNJG`U04b|g1$bbY(OT?`WHoEx;vnl%1uTb+T79t^-nkHW8JuVEL?+GP zK7;QSd{G{}r}4c(s%Y+G@S&#ns?OP2c+sYL4Ge~BhWuPV#J8IIJgCuoI|;B=u&Qwv z!sqIxoIGqFY)>CYckh9ajw$3xEyRvA$#JpPfhy>X`I=x?$zT75y&UUoUKS2B0=uW( zkE4Yxr51-eSO@YT4a=oeoYge8D8v zKdOa2vhiypN70Ah%SirB4G#6jh!6#zcp7)y(+w~|e^xQ~=3|!-nK%i$XV&EaIcVMr zLNwKf{^w>(ha8p|Q!VY8ixX^CnOBXI1)db52U;hs$)eYcOSA=()I+_paJ4~sx?d2= zq6TKVUkOn+H+djLX8Uz*sl1|0e9wSWii+`1AwM!|} zq<|`;=KNWIkcRvF;W4^-|CW;WE7*emYVpVds*Td8ldb1zb~dv*=|80#&sMuiCsJi( zaJDKQ)rBgffILf_rgv#>J8}J}DDqghx8xK}C7+n>s&I5J)kHtTDfXgfn*HPaQiIJc zRcWU)R2LVI^H5DJ8pK+WyeqfPlJ8xHA~yBpg~SPE1C8QZPio|uK78H`dP)jo|~BCJ2v zK7PPUt#lHkOxAZAm%hYu55|Pqvr}H)n~Gg(W#JFDYwC7+Hr2k=N)L2arBnM+=_49D zhpD5JHoyR5Y^cb=F-D7DEUjQFe2hJ;v8lS{!_{ns3c6Vs*S1Nx>KE7ph{l=oCUhSiE4dq0WA>eZ~DSGb-OkL6run1et4o^)S8=E zvl#YhJu-Nv4`{hfs$G2+trd~ui9Vx=zwq#T!)4v*FO7X?bp!ZH4E6G(c^KR}ut4#y zFhU(%=E1Pm*5U*xqR)EwNw%bitN@RSos)z2+5%3MqKv}=546@pE#daofh5|`wbrve zKG1P>ZYEAO0zBt+nyqQ=pYtW$o;Sc)(^}^JS>u5k_4@@GxX}pkB(`0c0WVR5yC!k^ z!d}q$LuYAyT9>NINp%yez0)eqi7jGA?R3BMaeV;G@+DTiL$n7E$}@dn22gmgb&D4D z<(0nV`3+gV)JGatx~5c5shf9~h=1#M-VD@4&lg(tim2W)s&GY}T(q9*K;K__=<~+K zS?G?iW0+rTlG^k%CAUNczwD?M#iv0ox5UHv=v8rAvrTHuQ=Rd$VR=DZ7wjrwMQ}$WS0iK{gsaj9IT~G6*-vLcf;zaCW0#jf z!L|%K~;YyM?DNzLlHeND;yDp;d`Ehj~(EMin1P}I$`GN2nlosT4=g|eGYu!xh~iuQg6k+ zV~Jvnj8wb(dzi!y`XjVarF(N|m95&y(`zqY>@JiiQd7l|s(FA1$VZ!(k;WL_)EUZb zj`z$yVJagzDd8;oAMJiCBlW_?WyVy^zsrRN!JLJQ>x5k^>{e;KHd1!Z`;19Lja550 zxM_L$hf4=mJSq4w9p$Y74d}yz~&4bX^z!0rg?bHr_ zMJdMk!@IHS+8^kUp}bqnT=D+rUXAx)yqGE~qdnU4257YXSw+5>MT^N#@eXLJrlz#j z8GMdzNe1tJRv6=-Z)eaF63hv;jhb68z3tFV$>6i#o5uLVyP5PVh2t>$P$Q?fD>;!- zG38O7YAdmJgDp6v&Th@X7EyLGvI7l6qJHe0@W2S@MAsF($!`~B<_$YV>32>ZJI<`0 zBWd&Qcqz!R*mPh=84eMdfg(d;hGPX77^PzjCmWLk3}@NA?C%!{Jl8*lmC~9#yuW|J zvH0y#bm3OhKZO_W5qziM6km8i0P^stHy5K__SBjnOCU#TA;c-y@AXC&vvhNeBR>V&J&r|@h64=Ep;jW~P$qdN+l9^x z9Bo5g5^Yt+b=HtG+zCBiQ>d^SF-30T7EDAA@{mSG?GMr zd`H?h(y4`>`UBBwRIzUr;CTa@aKkTzyrRilE~}=T)`&~LuFz;>*};bH=zvM1Xu~9O z(5g#}*3+FEt)DwFn&J(68iNB4hXj%oaX3oI5!8zEQ<*d-i((F|9gBhv7Z}t;9WEDk zsr1@F!}QUhqdD$yvsmh_cRS-ALsHz~eu0{}L;EV^u=LXuts@UlYZIE(s>2z0*|0|s z_r|NyKN5Co(vft$BdWJ~DEf2`;mUKu1Gkmu{TjYD)Sr(ZjZ@d&kA^ID>dz)w@m8}? z-8@0%u^rDIl}2jJ25&X#!cdG8N3*CD3dUQK3XOQ%=}c9tTY1~bc7!epiJ`A5PUKPP z(~PsqD0gvRYXsb)o33dmI}y4Wq+za6r%vTjy_3GoWK-^9?ud->W6hDlDUNOg2|J=* z`EwRkI;XqAbYg5i&li69o%BXE>b!>xeceUZfn>pV)LWc-;ItcaWBiLs-+0k=s?B?1 zcv|(H7Zhs|^6&v~q%lq$b*s8F(O70gIK}!5Um4@PY?!5y${5MA%WO+^`y>M!49rK0 zbp~FmJJ;8hTXpNTF>7nb0_Rgt@Q=0P7ZYc zGeGcug7f{)XaUF*um!_%$Ghc1Ce{YX@J0U|QF+uqh!xWQ(O7-ekI!in9#vJBvT=){ z&E3%s!MFMGHwE7oD|bil3%*S?U&+OfZCvWgd0{{GKfn!P$-{N!dqF={zy3TEBi#Xy z&p}4^mmX~A)Z3_0VFg9EK=pyRo#nOJOijBw-;mh^^0fVrU_e95y19qy? zFEjC+(clHZd!ox);)2Y;SEe;yaC4rmf*| z3dzMSE>5TFdmPL6btfEPaFac-MEIf1OIaN+#LzgMuB#kt-W=8t}u z<8Sm2ABmQ?4L?5p@{y~XV`6*BB|ROpJpvsfZQdG(Py!>gfE6_yg73acPYIdkom4yB z9Hni`e}*n|dVYj^?g!Pno34+v|G^=;9sRJHH+@EBt+#H<^y+z{>OV#=FptB(Eu z!JDjup|BoK8*5cd@Ze=bb97#J$g6rVa*cRL19-K)#Avv+$_@76)yfZtdmzC&*bewu zsCSKg5tGJkJeVA>-8R+?N^4dN-baHfu0AFP?P{x;8<-_N~%*ZGx_oRf~q zF;O`u*s%mzIM=iZ$ECC)KaFp`&l zr(F@~v4(Y`L+A5Gh=cjr(J7*`K2cBT4u5;{@L~E*#{=uFx=ebMqej3~4ERtqPVq?k zIqi{bj-eZl2ToZJX3@oe3+rZqiRQu3%1Z4<^BK}XkHBqtf1)kfd3G|Ub*KyE(BGQ~ z$|-r1`WC6uL{n7}n7O zn!0s{g|ZSMH|cJ5mDr=(!Y1qcLii^fFjh)e$cK~Wfh)SggYXw-e;Da{+X$CM_JUvb zD;WZLc-x2_btS*h!HVb)@zE8PlPhZj5_NP(rm-U~DrL4V8=I<8}2%D{V! z*Glb4KY^rgkH!JEj_l=1L}aL?MaE0>p)4{1tBhrRHUQ$SqI-e11A(KUa)C=kU?B_0 zj^bIt7Sb1VP~TuI9s%tM3{NqSuM69#SzRsg9ckX!QAa&~piOAB_VuELbKZuXN`YU9 z!j%ApTODOT&`}~j)Pe!LAF$|;6$-&^^&PB3)rt5d{$8Js5|f;u&(#o;ha zd3;bL?UyF$INfY4UK!c2$IwoY?sxDe>HM$|WO0<9))qAJ1LV*;wCBKyv+E9iRnLK( z@a~8Pxj59qEzc-=1y_^wO0x0?Lo_BN+e?z;EzR`CGr7BGvpo#|1B2zj&B-BB#xRIJNwohBl0)I9iZ?seD{3H-@#uOO?Df5ak2g96=wYW^}K%LRyo>_iE1>i&tj`twVe8 zyuA7Eqi~17+hp*xyzxzqz&7h*1w4W88*4))ZC*b%yh#gqUieRC+&82C z4?x%Q0~&yxmS+al^5e$(X$|D#IWqxP4QXA{R-3>-|IDz?*1$MOZ@RY@o}!AK-tcng%%MkpwN2PthHyKgZ;1nT>jw8S~I^j zvuCfz%yYl7czyOGpeQjBtzYC1?6V3BwmtT_n-oG-q z(UaoI_oP-v$oaoYIX|22nU+~KvNbRh9CCW-RoI0M1>v3@o@9@o z$)(HnC8_c^VQzRwHTLuzG#h$LxHw37PARSMd?3q5B^tXYLbmXHj#DDl)$r}`7cCs` zl@aZ^Tjj}~*6zuf*52aD^Yrv2c@E01rybG}kuGc?yVHsVfNc8faA60THjpIeu}pbY z1{-CQpjbF9b*np8z7T1LV%ZcG4F`?$lb}NMXB&N$jbt?)mly#?Yc{y$PJ4}b(EPh1 zDi3BeCp%tt$cFp!MLnanKIjq}3~D={jP3>ESyPgSVhZ6!lU9r4j2$30Grb99$DT!E z7teAN>^t`ego8N9=q9-&z69Pk(XOY<#c9U6L3U*f6&Fz2>Ba@v^36tZO$+p(iFVOL z$o17xa1du)2|;p8id*;#d49kUh?VD)mC!kETpxhmrX=5qiIur2aiYWojV z77I`>h80;Zzi!dDb!x}hD+!lGecdj_4x$c<`IXN>K&d_iltKs-1!X<)H zlEQ)?p<{m74k{fv2ea7MUDgy0Q1*`mKj z)qKP2z%9xc9qea~EXMfBj%9A}Gp^LZDlwHL`E72osMqxP9i?IcgH3+)9#`-nP12FS zSgg~+d-*Q0SwmfCMvCnWZR3gV<%w?d>)icmDu`o@ZIdI53dCs(!&v$2j}CuNQr%}v zf9fZpc#yd4C(DQ9G-XrqXX3UNTHP)YWHT42d*;s}LZTf!{JW^qTriY;cKm3KQmrt-DelKwG}f#LJ!$p;TC`QnL;>f3S?Q74Yi$1 zqr(A`eKhi!kwqduz+A!BZ4TqZLEwZEGUE=QxeT@XqeA7Aaaw^(dw*rFn5lWHLX|n- zA-qQ6W^Pz$AvJQz1M}f&AK5v&P_%0k%I!81{_6ns4&im(Q=mfm+ zU=dce$Vph$BxYaHpQ*lqYRSZiK&odDsMbq7$dbJDP%ioxGdlf>dIoC)$zzxBaJ7My zfyJ{tT&+AkIUmazqh=CypmkM?H}OPwF}W#F<H{t^Pm3$X!}5F3eX2%-R8W_4`n z?5M(I*ydHpIn^%o9wyggKHN&=792u5;vU^wYzgA5cvx$&2EJ3{M(<(%TE94%@>n)} zjP1p~TPo${n;y%-dZ$+8iy(5-@3a-AXq@x15W(~YP#WsV1H1gThE@{y6N$m}-r|Dk zjk|(nowM}ZxAPH(JI% z#8p7djAIRH>t# z!1X*EtXZ$>zG8h(&K|q92G+~UCkk+rL^PMK6NLZX!avcS#qWD^>H5+~BY(Rz4>w9Y zIxaojCH#9eZP7D?{O5bDb-3{HmbCV%Ea3|AZWlyx2)R=M>Tv0Ygvci8h7ekL@c>D( zYDK1)su{I#ov5e^k>iII`_wSikh?Nd%=>AB+5|7>QI>{~)2`v^prm%e8%ctt@`+V0 z@rE8pIqqAE!r74D9$}~>g#GrE7G=b9`Qn_3vFCoxqg`b1CilD;!k%jjrIo?PrO)Sy z=umSlR9JrY`~Z<0O6!pjY6ZQ>B?1!U;kGK#UkeZZ@@L|HO)JkI7b?eRyCIYwaXODO zI+WJGo}tmwy*67ssUhY0PxElCsKe#oz*K7}um5JoTBU76rr52q(hY;e+c?%etFKR3 z$3taRgPMe@Bd6KiQJjSD0qcUL{@;#lab4S0E1=FfKk$@ph0-}ED2$ftR;YQ-iPf@v z<*hyWzZ?rH5gK7QQ`F z2pddP9qeARFoUt})Ds1@8QZG`^+w)37753_qw)doLyLbeTrDenM#y{Ne9F6QN^;Y2 z7yMwt`k)3vc5QQuANg4$P~b%VV>lhvFnfD7^5lt3QP|7$Cza3o5&W}WH0p4xD=ILj z;o(HHs8F+i^JJu$rFm8O>l6=j+5+~$ zSi_i(+@s2#wat!}?!D|xkr6bVbX2(UlBoOb8MwRTzZ>_*sz`$}FRcO%?l)n7JTRJ} z29p#cDk8|CW|M&m*|TZkL z$dF_xP2LK;2gt-oGEpXrUid zXjBA>bJ~Cvf?tz?i`D|%w2+IDd|_g!flsRnn5}Tnz2%C5v3{mtd!w z*58uM;6s;UQq{F_b}EK;Dr?iZ>u5WdwR#d^w5B;LAGc`@SiGM>4)iJ z1J7lx9bMhL_uFijTk731^rI%Ri~rpHQ4Sn3EvkLTaXXbmp8si{#!>8wnf(IK;;7vH zQJg&7ZHMb7r*iubJlHKeIb4t&f5t6t+1cS@O-YuXE)ZE!zRp%C-KX4;C#(L*&Nhg; z=0&lijZzY}!vDm{qo2fzDJH9y8ZUVjIs?&Rcw$lG&4{8rw~VM6a^)G9SfdGbr1}*R z(i&_lb(EcNe(C_cic@1r9IHSau_*6x>LkJMD%jXP8nW?1PL26)6dk87X`$nzSUL8r zUEI_rR9^iDv3H22+oe$HzJS#)iW6(1Dar72nIbpZOz7)zv)%_=%<30yy+zDSNJEh< zKmQGkG5@Z1f*ORQqNC5AR??M%1GC0Db=R!ysbdIP~qH)3?1 zCk`<7S`3BM2}^3amL<+;P6gCC9`G!Yn1{tx?t3q#f6GHpB23+=U@~F~ukvo-Trq7=BCaBq_C2!+tC0D( zvaq@_X64)`P1XjBnZ}FD)kH6j<-GeM;frPL_bzlH#;uII+roF!gbxwAGnV%}@0*ew zcBeNw53?!*d}(3ltx>O2=kqvcJjL~hBiBI5{`X7~5obo0x@V4$qeLAOr}CA^;*K$K ze9v5>McMebX!+`ov0}KzdM}$FU=t(b=(2eVGbpLsW{)McejRO`1F(xl+JwsX%b8t@ zIy}X!;oeK*INfg~5tbS++n~32Lv!m(;)7rKaA^ljkZonaRfX&pafc?Tk-q?V@ZTt#;-%b5xKCcd47A9Cj-Cn zWln3a%StfU3(xViXU8l5d_A5A$FHjF%Je%brJD z*Prkj*+zr1VeSH!v7v?p&U|fJG-jkjjFFoRablx3VdvQ5)G%V@nEf{R>W{jYXo; zxJ7Ux?<`y-La;GC9X+wX$?`t7%nJ`@cnrgX6FCu1Wq7dhUT=(zdOfhpg$ulF6P``6 zMy=q^O-6YN%z$-lsVUJ|orMp)S+iC`-0n57Es-|-dl}i5$ocS?CAPTmwVl=$^yj|M zyaqm1GcfPMn`jx10+QJq*tL=uH#n30xa##L&P@>B>j=Hjij z$k)=6e8_@9npd$goY8Vd@k^ALK%GT-5=F>NM$3&+g}51N)Tqj>ix_S3j=GA`mL$%v z?OHTW6hj_twP0^@eFJS&y2%1|#q}Wc7zaQD#~&>iWZQs8Fr<9E#NrY7!fs=wo;`o58O}Fwy8906U=BSM%mT$Q3K} zz1oE)`npkotmvV|Kwb(>K&5?xDuH@uO;`yTr}s@(%|a0@960^iDo-LayrM7TI_) zgtrR)y$EX5rOyaUgN66o5%@qqJ^+#EgQgATO!)C!eK4BNrXEeDFGlqiC{Uw587=YE z@S7|di*dJF+fqlJ4QymBerCY0yPLhp+EhO5?DAC^H^zetU5_VYxF zk50g!PIUiMlZvMcO6wMX##!n#dCfb>pHulv@{fB|mmkEs8}2Y&P)Ag(Q8f`QML8&< zdK*oX&?%#g$Vr&;`;dffhPxRv&Tl874-YjB(C=(u6w~)9+Dz9Evx)nR?#WOCk7=p) zVT_)IdUW(Db&U+mn18}S7hG*T-V+Y~bcF$rn11<%`RF&MvYn9LaT-#`wG%qiKmAUB&VW zhM#iMTU*CnRUn~8#q-O)8e>xpKK?GvTk*V?jdVI`4UTz{PAA9nAMQ~-I|qyQ1#QKO z>C0^8D}IjKUgRq$-y8*`(MA!)w`);tN&=j`*0`S|M##O;etaW_uxLE; zIF{uxCTnbZ94$RWQkd^A`g$8*JPuPJeczBpuv&;;rtdqs1itW#o|E3AN8N1v&$HdL zt!{>rrW#$#AUaF6Jm=Z|94Vh`@uV$ncX7{9&o)m%-Bbnb*mq|c1i@3D;I{Dji{~$> zX_(PaGk<>7oSJsop3~ZnjK|*-;0c_Df0SxP+XEHN&GVX_wR4^6)$?Z86wIohSN)50 z=j__%n(76$3u^}UY^YjP+tAYBbUUk>=d?7`%w6D`pX02mub=mL&Fp-qyVyCurLnQT zwq|x=yC<+3j~=mY<1DKemT&ahy|8@KziRp4+xZca>Tb}xD!7IZ-7?G5WIf5H!a}Ea zsDT5Wjf)p}=FRQt#D8*{EyG5CEVq@6Q-5UOBiAmdt*WnGQsZ3Usd3JpwZPfjGIwrG zGZ9K0tdtW-=gJ{%hU!AytLHT|RLz~u)?ewFn2U=uKc^?I04>1(?_^@k$?&3Ky@iY( zJ?J;7ah~O#R`uJ@M%1b$RKF%wKSCWZ67FyFudi8HQ{P_c*JfL=xUr_a&A(#8gs~Ia t+idldbt-core’s API documentation

    How to invoke dbt commands in python runtime¶

    Right now the best way to invoke a command from python runtime is to use the dbtRunner we exposed

    +
    from dbt.cli.main import dbtRunner
    +cli_args = ['run', '--project-dir', 'jaffle_shop']
    +
    +# initialize the dbt runner
    +dbt = dbtRunner()
    +# run the command
    +res, success = dbt.invoke(args)
    +
    +

    You can also pass in pre constructed object into dbtRunner, and we will use those objects instead of loading up from the disk.

    # preload profile and project
     profile = load_profile(project_dir, {}, 'testing-postgres')
    @@ -92,6 +101,11 @@ 

    project_dir +

    resource_types¶

    +

    Type: unknown

    +

    TODO: No current help text

    +

    select¶

    Type: unknown

    @@ -313,6 +327,10 @@

    vars¶

    Command: init¶

    +
    +

    project_name¶

    +

    Type: string

    +

    profile¶

    Type: string

    @@ -618,6 +636,10 @@

    version_check¶

    +
    +

    macro¶

    +

    Type: string

    +

    args¶

    Type: YAML

    diff --git a/core/dbt/docs/build/html/searchindex.js b/core/dbt/docs/build/html/searchindex.js index 3ed297346d9..36036732601 100644 --- a/core/dbt/docs/build/html/searchindex.js +++ b/core/dbt/docs/build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"right": 0, "now": 0, "best": 0, "wai": 0, "from": 0, "i": 0, "us": 0, "dbtrunner": 0, "we": 0, "expos": 0, "you": 0, "can": 0, "also": 0, "pass": 0, "pre": 0, "construct": 0, "object": 0, "those": 0, "instead": 0, "load": 0, "up": 0, "disk": 0, "preload": 0, "project": 0, "load_profil": 0, "postgr": 0, "load_project": 0, "fals": 0, "initi": 0, "runner": 0, "thi": 0, "re": 0, "success": 0, "cli_arg": 0, "For": 0, "full": 0, "exampl": 0, "code": 0, "refer": 0, "cli": 0, "py": 0, "type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "unknown": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "have": 0, "been": 0, "explicitli": 0, "string": 0, "which": 0, "overrid": 0, "dbt_project": 0, "yml": 0, "path": 0, "directori": 0, "look": 0, "file": 0, "current": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "includ": 0, "The": 0, "name": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "configur": 0, "onli": 0, "appli": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "macro": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "how": 0, "invok": 0, "command": 0, "python": 0, "runtim": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "select": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "model": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "resource_typ": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "How to invoke dbt commands in python runtime": [[0, "how-to-invoke-dbt-commands-in-python-runtime"]], "API documentation": [[0, "api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "select": [[0, "build|select"], [0, "compile|select"], [0, "list|select"], [0, "list|select"], [0, "run|select"], [0, "seed|select"], [0, "snapshot|select"], [0, "test|select"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "models": [[0, "compile|models"], [0, "list|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"], [0, "dbt-section"]], "output": [[0, "list|output"], [0, "list|output"]], "output_keys": [[0, "list|output_keys"], [0, "list|output_keys"]], "resource_types": [[0, "list|resource_types"], [0, "list|resource_types"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file +Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"right": 0, "now": 0, "best": 0, "wai": 0, "from": 0, "i": 0, "us": 0, "dbtrunner": 0, "we": 0, "expos": 0, "cli": 0, "main": 0, "import": 0, "cli_arg": 0, "project": 0, "dir": 0, "jaffle_shop": 0, "initi": 0, "runner": 0, "re": 0, "success": 0, "you": 0, "can": 0, "also": 0, "pass": 0, "pre": 0, "construct": 0, "object": 0, "those": 0, "instead": 0, "load": 0, "up": 0, "disk": 0, "preload": 0, "load_profil": 0, "postgr": 0, "load_project": 0, "fals": 0, "thi": 0, "For": 0, "full": 0, "exampl": 0, "code": 0, "refer": 0, "py": 0, "type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "unknown": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "have": 0, "been": 0, "explicitli": 0, "string": 0, "which": 0, "overrid": 0, "dbt_project": 0, "yml": 0, "path": 0, "directori": 0, "look": 0, "file": 0, "current": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "includ": 0, "The": 0, "name": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "configur": 0, "onli": 0, "appli": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "how": 0, "invok": 0, "command": 0, "python": 0, "runtim": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "resource_typ": 0, "select": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "model": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "project_nam": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "macro": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "How to invoke dbt commands in python runtime": [[0, "how-to-invoke-dbt-commands-in-python-runtime"]], "API documentation": [[0, "api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "resource_types": [[0, "build|resource_types"], [0, "list|resource_types"], [0, "list|resource_types"]], "select": [[0, "build|select"], [0, "compile|select"], [0, "list|select"], [0, "list|select"], [0, "run|select"], [0, "seed|select"], [0, "snapshot|select"], [0, "test|select"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "models": [[0, "compile|models"], [0, "list|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "project_name": [[0, "init|project_name"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"], [0, "dbt-section"]], "output": [[0, "list|output"], [0, "list|output"]], "output_keys": [[0, "list|output_keys"], [0, "list|output_keys"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "macro": [[0, "run-operation|macro"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file diff --git a/core/dbt/docs/source/_ext/dbt_click.py b/core/dbt/docs/source/_ext/dbt_click.py index 1431ca91e41..7343cc6a110 100644 --- a/core/dbt/docs/source/_ext/dbt_click.py +++ b/core/dbt/docs/source/_ext/dbt_click.py @@ -44,7 +44,9 @@ def format_params(cmd) -> t.List[nodes.section]: type_str = get_type_str(param.type) param_section.append(nodes.paragraph(text=f"Type: {type_str}")) - param_section.append(nodes.paragraph(text=param.help)) + help_txt = getattr(param, "help", None) + if help_txt is not None: + param_section.append(nodes.paragraph(text=help_txt)) lines.append(param_section) return lines diff --git a/core/dbt/docs/source/index.rst b/core/dbt/docs/source/index.rst index 93d34a648f2..dcd1c82499f 100644 --- a/core/dbt/docs/source/index.rst +++ b/core/dbt/docs/source/index.rst @@ -6,6 +6,7 @@ How to invoke dbt commands in python runtime Right now the best way to invoke a command from python runtime is to use the `dbtRunner` we exposed .. code-block:: python + from dbt.cli.main import dbtRunner cli_args = ['run', '--project-dir', 'jaffle_shop'] diff --git a/core/dbt/main.py b/core/dbt/main.py index 3c23cfec4b3..d651c073765 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -30,7 +30,6 @@ import dbt.task.generate as generate_task import dbt.task.init as init_task import dbt.task.list as list_task -import dbt.task.parse as parse_task import dbt.task.run as run_task import dbt.task.run_operation as run_operation_task import dbt.task.seed as seed_task @@ -541,7 +540,9 @@ def _build_parse_subparser(subparsers, base_subparser): Parses the project and provides information on performance """, ) - sub.set_defaults(cls=parse_task.ParseTask, which="parse", rpc_method="parse") + # NOTE: changing this cls to None is breaking, but this file should go + # away once merging the click work + sub.set_defaults(cls=None, which="parse", rpc_method="parse") sub.add_argument("--write-manifest", action="store_true") sub.add_argument("--compile", action="store_true") return sub diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index 787b70cfeaf..48eb9fca9c5 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -7,9 +7,11 @@ from typing import Dict, Optional, Mapping, Callable, Any, List, Type, Union, Tuple from itertools import chain import time +import json import dbt.exceptions import dbt.tracking +import dbt.utils import dbt.flags as flags from dbt.adapters.factory import ( @@ -20,6 +22,7 @@ from dbt.helper_types import PathSet from dbt.events.functions import fire_event, get_invocation_id, warn_or_error from dbt.events.types import ( + ParseCmdPerfInfoPath, PartialParsingFullReparseBecauseOfError, PartialParsingExceptionFile, PartialParsingFile, @@ -44,7 +47,7 @@ from dbt.node_types import NodeType from dbt.clients.jinja import get_rendered, MacroStack from dbt.clients.jinja_static import statically_extract_macro_calls -from dbt.clients.system import make_directory +from dbt.clients.system import make_directory, write_file from dbt.config import Project, RuntimeConfig from dbt.context.docs import generate_runtime_docs_context from dbt.context.macro_resolver import MacroResolver, TestMacroNamespace @@ -89,8 +92,10 @@ from dbt.dataclass_schema import StrEnum, dbtClassMixin +MANIFEST_FILE_NAME = "manifest.json" PARTIAL_PARSE_FILE_NAME = "partial_parse.msgpack" PARSING_STATE = DbtProcessState("parsing") +PERF_INFO_FILE_NAME = "perf_info.json" class ReparseReason(StrEnum): @@ -193,6 +198,7 @@ def get_full_manifest( config: RuntimeConfig, *, reset: bool = False, + write_perf_info=False, ) -> Manifest: adapter = get_adapter(config) # type: ignore @@ -223,6 +229,9 @@ def get_full_manifest( loader._perf_info.load_all_elapsed = time.perf_counter() - start_load_all loader.track_project_load() + if write_perf_info: + loader.write_perf_info(config.target_path) + return manifest # This is where the main action happens @@ -954,6 +963,11 @@ def process_nodes(self): self.manifest.rebuild_ref_lookup() + def write_perf_info(self, target_path: str): + path = os.path.join(target_path, PERF_INFO_FILE_NAME) + write_file(path, json.dumps(self._perf_info, cls=dbt.utils.JSONEncoder, indent=4)) + fire_event(ParseCmdPerfInfoPath(path=path)) + def invalid_target_fail_unless_test( node, @@ -1378,3 +1392,8 @@ def process_node(config: RuntimeConfig, manifest: Manifest, node: ManifestNode): _process_refs_for_node(manifest, config.project_name, node) ctx = generate_runtime_docs_context(config, node, manifest, config.project_name) _process_docs_for_node(ctx, node) + + +def write_manifest(manifest: Manifest, target_path: str): + path = os.path.join(target_path, MANIFEST_FILE_NAME) + manifest.write(path) diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index f6d37937e99..1a948b48b2e 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -45,10 +45,11 @@ from dbt.events.contextvars import get_node_info from .printer import print_run_result_error -from dbt.adapters.factory import register_adapter +from dbt.adapters.factory import get_adapter from dbt.config import RuntimeConfig, Project from dbt.config.profile import read_profile import dbt.exceptions +from dbt.graph import Graph class NoneConfig: @@ -96,7 +97,7 @@ def set_log_format(cls): log_manager.format_text() @classmethod - def from_args(cls, args): + def from_args(cls, args, *pargs, **kwargs): try: # This is usually RuntimeConfig config = cls.ConfigType.from_args(args) @@ -123,7 +124,7 @@ def from_args(cls, args): tracking.track_invalid_invocation(args=args, result_type=exc.result_type) raise dbt.exceptions.RuntimeException("Could not run dbt") from exc - return cls(args, config) + return cls(args, config, *pargs, **kwargs) @abstractmethod def run(self): @@ -167,17 +168,36 @@ def move_to_nearest_project_dir(project_dir: Optional[str]) -> str: return nearest_project_dir +# TODO: look into deprecating this class in favor of several small functions that +# produce the same behavior. currently this class only contains manifest compilation, +# holding a manifest, and moving direcories. class ConfiguredTask(BaseTask): ConfigType = RuntimeConfig - def __init__(self, args, config): + def __init__(self, args, config, manifest: Optional[Manifest] = None): super().__init__(args, config) - register_adapter(self.config) + self.graph: Optional[Graph] = None + self.manifest = manifest + + def compile_manifest(self): + if self.manifest is None: + raise InternalException("compile_manifest called before manifest was loaded") + + start_compile_manifest = time.perf_counter() + + # we cannot get adapter in init since it will break rpc #5579 + adapter = get_adapter(self.config) + compiler = adapter.get_compiler() + self.graph = compiler.compile(self.manifest) + + compile_time = time.perf_counter() - start_compile_manifest + if dbt.tracking.active_user is not None: + dbt.tracking.track_runnable_timing({"graph_compilation_elapsed": compile_time}) @classmethod - def from_args(cls, args): + def from_args(cls, args, *pargs, **kwargs): move_to_nearest_project_dir(args.project_dir) - return super().from_args(args) + return super().from_args(args, *pargs, **kwargs) class ExecutionContext: diff --git a/core/dbt/task/compile.py b/core/dbt/task/compile.py index 740d35d37e9..92eedcd97d3 100644 --- a/core/dbt/task/compile.py +++ b/core/dbt/task/compile.py @@ -10,6 +10,7 @@ from dbt.graph import ResourceTypeSelector from dbt.events.functions import fire_event from dbt.events.types import CompileComplete +from dbt.parser.manifest import write_manifest from dbt.node_types import NodeType @@ -85,4 +86,4 @@ def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]): selected=selected_uids, ) # TODO: is it wrong to write the manifest here? I think it's right... - self.write_manifest() + write_manifest(self.manifest, self.config.target_path) diff --git a/core/dbt/task/generate.py b/core/dbt/task/generate.py index 87723a530a1..119a32acf42 100644 --- a/core/dbt/task/generate.py +++ b/core/dbt/task/generate.py @@ -31,7 +31,7 @@ CannotGenerateDocs, BuildingCatalog, ) -from dbt.parser.manifest import ManifestLoader +from dbt.parser.manifest import write_manifest import dbt.utils import dbt.compilation import dbt.exceptions @@ -199,11 +199,6 @@ def get_unique_id_mapping( class GenerateTask(CompileTask): - def _get_manifest(self) -> Manifest: - if self.manifest is None: - raise InternalException("manifest should not be None in _get_manifest") - return self.manifest - def run(self) -> CatalogArtifact: compile_results = None if self.args.compile: @@ -217,8 +212,6 @@ def run(self) -> CatalogArtifact: errors=None, compile_results=compile_results, ) - else: - self.manifest = ManifestLoader.get_full_manifest(self.config) shutil.copyfile(DOCS_INDEX_FILE_PATH, os.path.join(self.config.target_path, "index.html")) @@ -262,7 +255,7 @@ def run(self) -> CatalogArtifact: path = os.path.join(self.config.target_path, CATALOG_FILENAME) results.write(path) if self.args.compile: - self.write_manifest() + write_manifest(self.manifest, self.config.target_path) if exceptions: fire_event(WriteCatalogFailure(num_exceptions=len(exceptions))) diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py index e1be8f214d3..a0b549f620f 100644 --- a/core/dbt/task/list.py +++ b/core/dbt/task/list.py @@ -2,7 +2,7 @@ from dbt.contracts.graph.nodes import Exposure, SourceDefinition, Metric from dbt.graph import ResourceTypeSelector -from dbt.task.runnable import GraphRunnableTask, ManifestTask +from dbt.task.runnable import GraphRunnableTask from dbt.task.test import TestSelector from dbt.node_types import NodeType from dbt.events.functions import warn_or_error @@ -40,8 +40,8 @@ class ListTask(GraphRunnableTask): ) ) - def __init__(self, args, config): - super().__init__(args, config) + def __init__(self, args, config, manifest): + super().__init__(args, config, manifest) if self.args.models: if self.args.select: raise RuntimeException('"models" and "select" are mutually exclusive arguments') @@ -132,7 +132,7 @@ def generate_paths(self): yield node.original_file_path def run(self): - ManifestTask._runtime_initialize(self) + self.compile_manifest() output = self.args.output if output == "selector": generator = self.generate_selectors diff --git a/core/dbt/task/parse.py b/core/dbt/task/parse.py deleted file mode 100644 index 5460bf0f3d0..00000000000 --- a/core/dbt/task/parse.py +++ /dev/null @@ -1,102 +0,0 @@ -# This task is intended to be used for diagnosis, development and -# performance analysis. -# It separates out the parsing flows for easier logging and -# debugging. -# To store cProfile performance data, execute with the '-r' -# flag and an output file: dbt -r dbt.cprof parse. -# Use a visualizer such as snakeviz to look at the output: -# snakeviz dbt.cprof -from dbt.task.base import ConfiguredTask -from dbt.adapters.factory import get_adapter -from dbt.parser.manifest import Manifest, ManifestLoader, _check_manifest -from dbt.logger import DbtProcessState -from dbt.clients.system import write_file -from dbt.events.types import ( - ManifestDependenciesLoaded, - ManifestLoaderCreated, - ManifestLoaded, - ManifestChecked, - ManifestFlatGraphBuilt, - ParseCmdStart, - ParseCmdCompiling, - ParseCmdWritingManifest, - ParseCmdDone, - ParseCmdPerfInfoPath, -) -from dbt.events.functions import fire_event -from dbt.graph import Graph -import time -from typing import Optional -import os -import json -import dbt.utils - -MANIFEST_FILE_NAME = "manifest.json" -PERF_INFO_FILE_NAME = "perf_info.json" -PARSING_STATE = DbtProcessState("parsing") - - -class ParseTask(ConfiguredTask): - def __init__(self, args, config): - super().__init__(args, config) - self.manifest: Optional[Manifest] = None - self.graph: Optional[Graph] = None - self.loader: Optional[ManifestLoader] = None - - def write_manifest(self): - path = os.path.join(self.config.target_path, MANIFEST_FILE_NAME) - self.manifest.write(path) - - def write_perf_info(self): - path = os.path.join(self.config.target_path, PERF_INFO_FILE_NAME) - write_file(path, json.dumps(self.loader._perf_info, cls=dbt.utils.JSONEncoder, indent=4)) - fire_event(ParseCmdPerfInfoPath(path=path)) - - # This method takes code that normally exists in other files - # and pulls it in here, to simplify logging and make the - # parsing flow-of-control easier to understand and manage, - # with the downside that if changes happen in those other methods, - # similar changes might need to be made here. - # ManifestLoader.get_full_manifest - # ManifestLoader.load - # ManifestLoader.load_all - - def get_full_manifest(self): - adapter = get_adapter(self.config) # type: ignore - root_config = self.config - macro_hook = adapter.connections.set_query_header - with PARSING_STATE: - start_load_all = time.perf_counter() - projects = root_config.load_dependencies() - fire_event(ManifestDependenciesLoaded()) - loader = ManifestLoader(root_config, projects, macro_hook) - fire_event(ManifestLoaderCreated()) - manifest = loader.load() - fire_event(ManifestLoaded()) - _check_manifest(manifest, root_config) - fire_event(ManifestChecked()) - manifest.build_flat_graph() - fire_event(ManifestFlatGraphBuilt()) - loader._perf_info.load_all_elapsed = time.perf_counter() - start_load_all - - self.loader = loader - self.manifest = manifest - fire_event(ManifestLoaded()) - - def compile_manifest(self): - adapter = get_adapter(self.config) - compiler = adapter.get_compiler() - self.graph = compiler.compile(self.manifest) - - def run(self): - fire_event(ParseCmdStart()) - self.get_full_manifest() - if self.args.compile: - fire_event(ParseCmdCompiling()) - self.compile_manifest() - if self.args.write_manifest: - fire_event(ParseCmdWritingManifest()) - self.write_manifest() - - self.write_perf_info() - fire_event(ParseCmdDone()) diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index bc8f9a2de75..f21dfd570e9 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -284,8 +284,8 @@ def execute(self, model, manifest): class RunTask(CompileTask): - def __init__(self, args, config): - super().__init__(args, config) + def __init__(self, args, config, manifest): + super().__init__(args, config, manifest) self.ran_hooks = [] self._total_executed = 0 diff --git a/core/dbt/task/run_operation.py b/core/dbt/task/run_operation.py index 9d7a469efd8..70bf39042f7 100644 --- a/core/dbt/task/run_operation.py +++ b/core/dbt/task/run_operation.py @@ -3,12 +3,11 @@ import agate -from .runnable import ManifestTask +from .base import ConfiguredTask import dbt.exceptions from dbt.adapters.factory import get_adapter from dbt.contracts.results import RunOperationResultsArtifact -from dbt.exceptions import InternalException from dbt.events.functions import fire_event from dbt.events.types import ( RunningOperationCaughtError, @@ -17,7 +16,7 @@ ) -class RunOperationTask(ManifestTask): +class RunOperationTask(ConfiguredTask): def _get_macro_parts(self): macro_name = self.args.macro if "." in macro_name: @@ -27,10 +26,6 @@ def _get_macro_parts(self): return package_name, macro_name - def compile_manifest(self) -> None: - if self.manifest is None: - raise InternalException("manifest was None in compile_manifest") - def _run_unsafe(self) -> agate.Table: adapter = get_adapter(self.config) @@ -47,7 +42,7 @@ def _run_unsafe(self) -> agate.Table: def run(self) -> RunOperationResultsArtifact: start = datetime.utcnow() - self._runtime_initialize() + self.compile_manifest() try: self._run_unsafe() except dbt.exceptions.Exception as exc: diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index fa8fdb724a8..e86cdbd2973 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -1,6 +1,5 @@ import os import time -import json from pathlib import Path from abc import abstractmethod from concurrent.futures import as_completed @@ -13,7 +12,6 @@ print_run_end_messages, ) -from dbt.clients.system import write_file from dbt.task.base import ConfiguredTask from dbt.adapters.base import BaseRelation from dbt.adapters.factory import get_adapter @@ -39,7 +37,6 @@ NothingToDo, ) from dbt.events.contextvars import log_contextvars -from dbt.contracts.graph.manifest import Manifest from dbt.contracts.graph.nodes import SourceDefinition, ResultNode from dbt.contracts.results import NodeStatus, RunExecutionResult, RunningStatus from dbt.contracts.state import PreviousState @@ -50,8 +47,8 @@ FailFastException, ) -from dbt.graph import GraphQueue, NodeSelector, SelectionSpec, parse_difference, Graph -from dbt.parser.manifest import ManifestLoader +from dbt.graph import GraphQueue, NodeSelector, SelectionSpec, parse_difference +from dbt.parser.manifest import write_manifest import dbt.tracking import dbt.exceptions @@ -59,53 +56,15 @@ import dbt.utils RESULT_FILE_NAME = "run_results.json" -MANIFEST_FILE_NAME = "manifest.json" RUNNING_STATE = DbtProcessState("running") -class ManifestTask(ConfiguredTask): - def __init__(self, args, config): - super().__init__(args, config) - self.manifest: Optional[Manifest] = None - self.graph: Optional[Graph] = None - - def write_manifest(self): - if flags.WRITE_JSON: - path = os.path.join(self.config.target_path, MANIFEST_FILE_NAME) - self.manifest.write(path) - if os.getenv("DBT_WRITE_FILES"): - path = os.path.join(self.config.target_path, "files.json") - write_file(path, json.dumps(self.manifest.files, cls=dbt.utils.JSONEncoder, indent=4)) - - def load_manifest(self): - self.manifest = ManifestLoader.get_full_manifest(self.config) - self.write_manifest() - - def compile_manifest(self): - if self.manifest is None: - raise InternalException("compile_manifest called before manifest was loaded") - - # we cannot get adapter in init since it will break rpc #5579 - adapter = get_adapter(self.config) - compiler = adapter.get_compiler() - self.graph = compiler.compile(self.manifest) - - def _runtime_initialize(self): - self.load_manifest() - - start_compile_manifest = time.perf_counter() - self.compile_manifest() - compile_time = time.perf_counter() - start_compile_manifest - if dbt.tracking.active_user is not None: - dbt.tracking.track_runnable_timing({"graph_compilation_elapsed": compile_time}) - - -class GraphRunnableTask(ManifestTask): +class GraphRunnableTask(ConfiguredTask): MARK_DEPENDENT_ERRORS_STATUSES = [NodeStatus.Error] - def __init__(self, args, config): - super().__init__(args, config) + def __init__(self, args, config, manifest): + super().__init__(args, config, manifest) self.job_queue: Optional[GraphQueue] = None self._flattened_nodes: Optional[List[ResultNode]] = None @@ -165,9 +124,9 @@ def get_graph_queue(self) -> GraphQueue: return selector.get_graph_queue(spec) def _runtime_initialize(self): - super()._runtime_initialize() + self.compile_manifest() if self.manifest is None or self.graph is None: - raise InternalException("_runtime_initialize never loaded the manifest and graph!") + raise InternalException("_runtime_initialize never loaded the graph!") self.job_queue = self.get_graph_queue() @@ -490,7 +449,7 @@ def run(self): ) if flags.WRITE_JSON: - self.write_manifest() + write_manifest(self.manifest, self.config.target_path) self.write_result(result) self.task_end_messages(result.results) diff --git a/test/unit/test_config.py b/test/unit/test_config.py index 456f16fade6..d45ee86587d 100644 --- a/test/unit/test_config.py +++ b/test/unit/test_config.py @@ -12,15 +12,15 @@ import dbt.config import dbt.exceptions +import dbt.tracking from dbt import flags from dbt.adapters.factory import load_plugin from dbt.adapters.postgres import PostgresCredentials -from dbt.context.base import generate_base_context from dbt.contracts.connection import QueryComment, DEFAULT_QUERY_COMMENT from dbt.contracts.project import PackageConfig, LocalPackage, GitPackage from dbt.node_types import NodeType from dbt.semver import VersionSpecifier -from dbt.task.run_operation import RunOperationTask +from dbt.task.base import ConfiguredTask from .utils import normalize @@ -909,7 +909,12 @@ def test_with_invalid_package(self): dbt.config.Project.from_project_root(self.project_dir, renderer) -class TestRunOperationTask(BaseFileTest): +class InheritsFromConfiguredTask(ConfiguredTask): + def run(self): + pass + + +class TestConfiguredTask(BaseFileTest): def setUp(self): super().setUp() self.write_project(self.default_project_data) @@ -921,17 +926,17 @@ def tearDown(self): # so it's necessary to change it back at the end. os.chdir(INITIAL_ROOT) - def test_run_operation_task(self): + def test_configured_task_dir_change(self): self.assertEqual(os.getcwd(), INITIAL_ROOT) self.assertNotEqual(INITIAL_ROOT, self.project_dir) - new_task = RunOperationTask.from_args(self.args) + new_task = InheritsFromConfiguredTask.from_args(self.args) self.assertEqual(os.path.realpath(os.getcwd()), os.path.realpath(self.project_dir)) - def test_run_operation_task_with_bad_path(self): + def test_configured_task_dir_change_with_bad_path(self): self.args.project_dir = 'bad_path' with self.assertRaises(dbt.exceptions.RuntimeException): - new_task = RunOperationTask.from_args(self.args) + new_task = InheritsFromConfiguredTask.from_args(self.args) class TestVariableProjectFile(BaseFileTest): From d27016a4e7568f477ee04d98973a18ef83f63d77 Mon Sep 17 00:00:00 2001 From: Stu Kilgore Date: Wed, 25 Jan 2023 14:52:57 -0600 Subject: [PATCH 26/54] Migrate debug task to click (#6728) --- .../Under the Hood-20230125-102606.yaml | 6 ++++++ core/dbt/cli/main.py | 14 ++++++++++++-- .../docs/build/doctrees/environment.pickle | Bin 205777 -> 205777 bytes core/dbt/task/debug.py | 11 ++++------- 4 files changed, 22 insertions(+), 9 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230125-102606.yaml diff --git a/.changes/unreleased/Under the Hood-20230125-102606.yaml b/.changes/unreleased/Under the Hood-20230125-102606.yaml new file mode 100644 index 00000000000..9e5cba8e28b --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230125-102606.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Migrate debug task to click +time: 2023-01-25T10:26:06.735994-06:00 +custom: + Author: stu-k + Issue: "5546" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 1adc432daf2..12e7d6f13a0 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -11,6 +11,7 @@ from dbt.task.clean import CleanTask from dbt.task.compile import CompileTask from dbt.task.deps import DepsTask +from dbt.task.debug import DebugTask from dbt.task.run import RunTask from dbt.task.test import TestTask from dbt.task.snapshot import SnapshotTask @@ -258,10 +259,19 @@ def compile(ctx, **kwargs): @p.vars @p.version_check @requires.preflight +@requires.profile +@requires.project +@requires.runtime_config def debug(ctx, **kwargs): """Show some helpful information about dbt for debugging. Not to be confused with the --debug option which increases verbosity.""" - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") - return None, True + task = DebugTask( + ctx.obj["flags"], + ctx.obj["runtime_config"], + ) + + results = task.run() + success = task.interpret_results(results) + return results, success # dbt deps diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index 3c70ad3fe4e42789208e299ae1de237e46920ac0..b4efa645e9d65c43bc0f22b1e035172c83a3a4c2 100644 GIT binary patch delta 33 ncmcb3oaf?ko()a%Y+u6kB@gE`cgVMQ$TI>l)AkN|W=|FX_&5xh delta 33 ncmcb3oaf?ko()a%Y@au4#ww&XcgVMQ$TI>l)AkN|W=|FX_*x8z diff --git a/core/dbt/task/debug.py b/core/dbt/task/debug.py index 853a01ebd1e..ab986f2b270 100644 --- a/core/dbt/task/debug.py +++ b/core/dbt/task/debug.py @@ -12,7 +12,6 @@ from dbt.adapters.factory import get_adapter, register_adapter from dbt.config import PartialProject, Project, Profile from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer -from dbt.config.utils import parse_cli_vars from dbt.clients.yaml_helper import load_yaml_text from dbt.links import ProfileConfigDocs from dbt.ui import green, red @@ -71,11 +70,7 @@ def __init__(self, args, config): else: self.project_dir = os.getcwd() self.project_path = os.path.join(self.project_dir, "dbt_project.yml") - # N.B. parse_cli_vars is embedded into the param when using click. - # replace this with: - # cli_vars: Dict[str, Any] = getattr(args, "vars", {}) - # when this task is refactored for click - self.cli_vars = parse_cli_vars(getattr(self.args, "vars", "{}")) + self.cli_vars: Dict[str, Any] = args.vars # set by _load_* self.profile: Optional[Profile] = None @@ -258,7 +253,9 @@ def _load_profile(self): profile_name, self.args.profile, self.args.target, - self.args.threads, + # TODO: Generalize safe access to flags.THREADS: + # https://github.com/dbt-labs/dbt-core/issues/6259 + getattr(self.args, "threads", None), ) except dbt.exceptions.DbtConfigError as exc: profile_errors.append(str(exc)) From 7fa61f08164dc9646c16ed0d2c7f5aec99ea3d15 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Thu, 26 Jan 2023 12:45:15 -0500 Subject: [PATCH 27/54] dbt init works with click (#6698) dbt init works with click --- .changes/unreleased/Under the Hood-20230124-175110.yaml | 6 ++++++ core/dbt/cli/main.py | 8 ++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230124-175110.yaml diff --git a/.changes/unreleased/Under the Hood-20230124-175110.yaml b/.changes/unreleased/Under the Hood-20230124-175110.yaml new file mode 100644 index 00000000000..9d8e5b35907 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230124-175110.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: dbt init works with click +time: 2023-01-24T17:51:10.74065-05:00 +custom: + Author: michelleark + Issue: "5548" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 12e7d6f13a0..f5ea1095cc7 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -21,6 +21,7 @@ from dbt.task.run_operation import RunOperationTask from dbt.task.build import BuildTask from dbt.task.generate import GenerateTask +from dbt.task.init import InitTask # CLI invocation @@ -307,8 +308,11 @@ def deps(ctx, **kwargs): @requires.preflight def init(ctx, **kwargs): """Initialize a new dbt project.""" - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") - return None, True + task = InitTask(ctx.obj["flags"], None) + + results = task.run() + success = task.interpret_results(results) + return results, success # dbt list From 08b2d94ccd7904879c062ed801ea61fdda0d2603 Mon Sep 17 00:00:00 2001 From: Kshitij Aranke Date: Thu, 26 Jan 2023 10:42:49 -0800 Subject: [PATCH 28/54] =?UTF-8?q?[CT-920][CT-1900]=20Create=20Click=20CLI?= =?UTF-8?q?=20runner=20and=20use=20it=20to=20fix=20dbt=20docs=20=E2=80=A6?= =?UTF-8?q?=20(#6723)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Github Build Bot --- .../Under the Hood-20230125-041136.yaml | 6 + core/dbt/cli/main.py | 23 +++- core/dbt/events/proto_types.py | 24 ---- core/dbt/events/types.py | 27 ---- core/dbt/task/serve.py | 46 ++----- tests/functional/minimal_cli/fixtures.py | 120 ++++++++++++++++++ .../minimal_cli/test_minimal_cli.py | 49 +++++++ tests/unit/test_events.py | 32 ++--- 8 files changed, 218 insertions(+), 109 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230125-041136.yaml create mode 100644 tests/functional/minimal_cli/fixtures.py create mode 100644 tests/functional/minimal_cli/test_minimal_cli.py diff --git a/.changes/unreleased/Under the Hood-20230125-041136.yaml b/.changes/unreleased/Under the Hood-20230125-041136.yaml new file mode 100644 index 00000000000..ff02d3ef752 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230125-041136.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: '[CT-920][CT-1900] Create Click CLI runner and use it to fix dbt docs commands' +time: 2023-01-25T04:11:36.57506-08:00 +custom: + Author: aranke + Issue: 5544 6722 diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index f5ea1095cc7..272334fe303 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -13,6 +13,7 @@ from dbt.task.deps import DepsTask from dbt.task.debug import DebugTask from dbt.task.run import RunTask +from dbt.task.serve import ServeTask from dbt.task.test import TestTask from dbt.task.snapshot import SnapshotTask from dbt.task.seed import SeedTask @@ -172,6 +173,7 @@ def docs(ctx, **kwargs): @p.models @p.profile @p.profiles_dir +@p.project_dir @p.select @p.selector @p.state @@ -187,7 +189,11 @@ def docs(ctx, **kwargs): @requires.manifest def docs_generate(ctx, **kwargs): """Generate the documentation website for your project""" - task = GenerateTask(ctx.obj["flags"], ctx.obj["runtime_config"]) + task = GenerateTask( + ctx.obj["flags"], + ctx.obj["runtime_config"], + ctx.obj["manifest"], + ) results = task.run() success = task.interpret_results(results) @@ -205,10 +211,21 @@ def docs_generate(ctx, **kwargs): @p.target @p.vars @requires.preflight +@requires.profile +@requires.project +@requires.runtime_config +@requires.manifest def docs_serve(ctx, **kwargs): """Serve the documentation website for your project""" - click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {ctx.obj['flags']}") - return None, True + task = ServeTask( + ctx.obj["flags"], + ctx.obj["runtime_config"], + ctx.obj["manifest"], + ) + + results = task.run() + success = task.interpret_results(results) + return results, success # dbt compile diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 5ee384643d3..37fe69453f6 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -1971,30 +1971,6 @@ class EmptyLine(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) -@dataclass -class ServingDocsPort(betterproto.Message): - """Z018""" - - info: "EventInfo" = betterproto.message_field(1) - address: str = betterproto.string_field(2) - port: int = betterproto.int32_field(3) - - -@dataclass -class ServingDocsAccessInfo(betterproto.Message): - """Z019""" - - info: "EventInfo" = betterproto.message_field(1) - port: str = betterproto.string_field(2) - - -@dataclass -class ServingDocsExitInfo(betterproto.Message): - """Z020""" - - info: "EventInfo" = betterproto.message_field(1) - - @dataclass class RunResultWarning(betterproto.Message): """Z021""" diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 0a0cd04fe1d..f56aaf51e4e 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -2463,33 +2463,6 @@ def message(self) -> str: return "" -@dataclass -class ServingDocsPort(InfoLevel, pt.ServingDocsPort): - def code(self): - return "Z018" - - def message(self) -> str: - return f"Serving docs at {self.address}:{self.port}" - - -@dataclass -class ServingDocsAccessInfo(InfoLevel, pt.ServingDocsAccessInfo): - def code(self): - return "Z019" - - def message(self) -> str: - return f"To access from your browser, navigate to: http://localhost:{self.port}" - - -@dataclass -class ServingDocsExitInfo(InfoLevel, pt.ServingDocsExitInfo): - def code(self): - return "Z020" - - def message(self) -> str: - return "Press Ctrl+C to exit." - - @dataclass class RunResultWarning(WarnLevel, pt.RunResultWarning): def code(self): diff --git a/core/dbt/task/serve.py b/core/dbt/task/serve.py index 4d702234d0e..696be89a37f 100644 --- a/core/dbt/task/serve.py +++ b/core/dbt/task/serve.py @@ -1,46 +1,28 @@ -import shutil import os +import shutil +import socketserver import webbrowser - -from dbt.include.global_project import DOCS_INDEX_FILE_PATH from http.server import SimpleHTTPRequestHandler -from socketserver import TCPServer -from dbt.events.functions import fire_event -from dbt.events.types import ServingDocsPort, ServingDocsAccessInfo, ServingDocsExitInfo, EmptyLine +import click + +from dbt.include.global_project import DOCS_INDEX_FILE_PATH from dbt.task.base import ConfiguredTask class ServeTask(ConfiguredTask): def run(self): os.chdir(self.config.target_path) - - port = self.args.port - address = "0.0.0.0" - shutil.copyfile(DOCS_INDEX_FILE_PATH, "index.html") - fire_event(ServingDocsPort(address=address, port=port)) - fire_event(ServingDocsAccessInfo(port=port)) - fire_event(EmptyLine()) - fire_event(EmptyLine()) - fire_event(ServingDocsExitInfo()) - - # mypy doesn't think SimpleHTTPRequestHandler is ok here, but it is - httpd = TCPServer( # type: ignore - (address, port), SimpleHTTPRequestHandler # type: ignore - ) # type: ignore - - if self.args.open_browser: - try: - webbrowser.open_new_tab(f"http://127.0.0.1:{port}") - except webbrowser.Error: - pass + port = self.args.port - try: - httpd.serve_forever() # blocks - finally: - httpd.shutdown() - httpd.server_close() + if self.args.browser: + webbrowser.open_new_tab(f"http://localhost:{port}") - return None + with socketserver.TCPServer(("", port), SimpleHTTPRequestHandler) as httpd: + click.echo(f"Serving docs at {port}") + click.echo(f"To access from your browser, navigate to: http://localhost:{port}") + click.echo("\n\n") + click.echo("Press Ctrl+C to exit.") + httpd.serve_forever() diff --git a/tests/functional/minimal_cli/fixtures.py b/tests/functional/minimal_cli/fixtures.py new file mode 100644 index 00000000000..ac746389c6d --- /dev/null +++ b/tests/functional/minimal_cli/fixtures.py @@ -0,0 +1,120 @@ +import pytest + +models__schema_yml = """ +version: 2 +models: + - name: sample_model + columns: + - name: sample_num + tests: + - accepted_values: + values: [1, 2] + - not_null + - name: sample_bool + tests: + - not_null + - unique +""" + +models__sample_model = """ +select * from {{ ref('sample_seed') }} +""" + +snapshots__sample_snapshot = """ +{% snapshot orders_snapshot %} + +{{ + config( + target_database='postgres', + target_schema='snapshots', + unique_key='sample_num', + strategy='timestamp', + updated_at='updated_at', + ) +}} + +select * from {{ ref('sample_model') }} + +{% endsnapshot %} +""" + +seeds__sample_seed = """sample_num,sample_bool +1,true +2,false +,true +""" + +tests__failing_sql = """ +{{ config(severity = 'warn') }} +select 1 +""" + + +class BaseConfigProject: + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "name": "jaffle_shop", + "profile": "jaffle_shop", + "version": "0.1.0", + "config-version": 2, + "clean-targets": [ + "target", + "dbt_packages", + "logs" + ] + } + + @pytest.fixture(scope="class") + def profiles_config_update(self): + return { + "jaffle_shop": { + "outputs": { + "dev": { + "type": "postgres", + "database": "postgres", + "schema": "jaffle_shop", + "host": "localhost", + "user": "root", + "port": 5432, + "password": "password" + } + }, + "target": "dev" + } + } + + @pytest.fixture(scope="class") + def packages(self): + return { + "packages": [ + { + "package": "dbt-labs/dbt_utils", + "version": "1.0.0" + } + ] + } + + @pytest.fixture(scope="class") + def models(self): + return { + "schema.yml": models__schema_yml, + "sample_model.sql": models__sample_model, + } + + @pytest.fixture(scope="class") + def snapshots(self): + return { + "sample_snapshot.sql": snapshots__sample_snapshot + } + + @pytest.fixture(scope="class") + def seeds(self): + return {"sample_seed.csv": seeds__sample_seed} + + @pytest.fixture(scope="class") + def tests(self): + return { + "failing.sql": tests__failing_sql, + } diff --git a/tests/functional/minimal_cli/test_minimal_cli.py b/tests/functional/minimal_cli/test_minimal_cli.py new file mode 100644 index 00000000000..a87c0c95f93 --- /dev/null +++ b/tests/functional/minimal_cli/test_minimal_cli.py @@ -0,0 +1,49 @@ +import pytest +from click.testing import CliRunner + +from dbt.cli.main import cli +from tests.functional.minimal_cli.fixtures import BaseConfigProject + + +class TestMinimalCli(BaseConfigProject): + """Test the minimal/happy-path for the CLI using the Click CliRunner""" + @pytest.fixture(scope="class") + def runner(self): + return CliRunner() + + def test_clean(self, runner, project): + result = runner.invoke(cli, ['clean']) + assert 'target' in result.output + assert 'dbt_packages' in result.output + assert 'logs' in result.output + + def test_deps(self, runner, project): + result = runner.invoke(cli, ['deps']) + assert 'dbt-labs/dbt_utils' in result.output + assert '1.0.0' in result.output + + def test_ls(self, runner, project): + runner.invoke(cli, ['deps']) + ls_result = runner.invoke(cli, ['ls']) + assert '1 seed' in ls_result.output + assert '1 model' in ls_result.output + assert '5 tests' in ls_result.output + assert '1 snapshot' in ls_result.output + + def test_build(self, runner, project): + runner.invoke(cli, ['deps']) + result = runner.invoke(cli, ['build']) + # 1 seed, 1 model, 2 tests + assert 'PASS=4' in result.output + # 2 tests + assert 'ERROR=2' in result.output + # Singular test + assert 'WARN=1' in result.output + # 1 snapshot + assert 'SKIP=1' in result.output + + def test_docs_generate(self, runner, project): + runner.invoke(cli, ['deps']) + result = runner.invoke(cli, ['docs', 'generate']) + assert 'Building catalog' in result.output + assert 'Catalog written' in result.output diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 3dbff04c303..935c3421607 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -1,29 +1,18 @@ # flake8: noqa -from dbt.events.test_types import UnitTestInfo -from dbt.events import AdapterLogger -from dbt.events.functions import event_to_json, LOG_VERSION, event_to_dict -from dbt.events.types import * -from dbt.events.test_types import * +import re +from typing import TypeVar +from dbt.contracts.files import FileHash +from dbt.contracts.graph.nodes import ModelNode, NodeConfig, DependsOn +from dbt.events import AdapterLogger from dbt.events.base_types import ( BaseEvent, - DebugLevel, - WarnLevel, - InfoLevel, - ErrorLevel, TestLevel, ) -from dbt.events.proto_types import ListOfStrings, NodeInfo, RunResultMsg, ReferenceKeyMsg -from importlib import reload -import dbt.events.functions as event_funcs -import dbt.flags as flags -import inspect -import json -from dbt.contracts.graph.nodes import ModelNode, NodeConfig, DependsOn -from dbt.contracts.files import FileHash -from mashumaro.types import SerializableType -from typing import Generic, TypeVar, Dict -import re +from dbt.events.functions import event_to_json, event_to_dict +from dbt.events.test_types import * +from dbt.events.types import * + # takes in a class and finds any subclasses for it def get_all_subclasses(cls): @@ -459,9 +448,6 @@ def MockNode(): FinishedCleanPaths(), OpenCommand(open_cmd="", profiles_dir=""), EmptyLine(), - ServingDocsPort(address="", port=0), - ServingDocsAccessInfo(port=""), - ServingDocsExitInfo(), RunResultWarning(resource_type="", node_name="", path=""), RunResultFailure(resource_type="", node_name="", path=""), StatsLine(stats={"error": 0, "skip": 0, "pass": 0, "warn": 0,"total": 0}), From d6ac340df09762040ef737c0463c418b78be5818 Mon Sep 17 00:00:00 2001 From: Ian Knox <81931810+iknox-fa@users.noreply.github.com> Date: Fri, 27 Jan 2023 15:07:30 -0600 Subject: [PATCH 29/54] Merge `main` into `feature/click-cli` (#6761) --- .bumpversion.cfg | 6 +- .changes/0.0.0.md | 1 + .../unreleased/Features-20230107-003157.yaml | 6 + .../unreleased/Features-20230118-233801.yaml | 6 + .../unreleased/Features-20230120-112921.yaml | 6 + .../unreleased/Fixes-20230116-123645.yaml | 6 + .../unreleased/Fixes-20230117-101342.yaml | 6 + .../unreleased/Fixes-20230124-115837.yaml | 6 + .../unreleased/Fixes-20230124-141943.yaml | 6 + .../unreleased/Fixes-20230125-191739.yaml | 6 + .../Under the Hood-20230113-132513.yaml | 6 + .../Under the Hood-20230113-150700.yaml | 6 + .../Under the Hood-20230117-111737.yaml | 6 + .../Under the Hood-20230120-172254.yaml | 7 + .../Under the Hood-20230122-215235.yaml | 6 + .../Under the Hood-20230124-153553.yaml | 6 + .../Under the Hood-20230126-135939.yaml | 6 + .flake8 | 2 +- .github/_README.md | 20 +- .github/actions/latest-wrangler/main.py | 17 +- .github/workflows/main.yml | 12 +- .github/workflows/nightly-release.yml | 109 + .github/workflows/release-branch-tests.yml | 2 +- .github/workflows/release-docker.yml | 12 +- .github/workflows/release.yml | 350 +-- .github/workflows/version-bump.yml | 2 +- CHANGELOG.md | 92 +- CONTRIBUTING.md | 7 +- Makefile | 9 +- core/dbt/adapters/base/column.py | 12 +- core/dbt/adapters/base/connections.py | 88 +- core/dbt/adapters/base/impl.py | 129 +- core/dbt/adapters/base/plugin.py | 4 +- core/dbt/adapters/base/query_headers.py | 4 +- core/dbt/adapters/base/relation.py | 18 +- core/dbt/adapters/cache.py | 97 +- core/dbt/adapters/factory.py | 12 +- core/dbt/adapters/sql/connections.py | 10 +- core/dbt/adapters/sql/impl.py | 4 +- core/dbt/cli/main.py | 2 +- core/dbt/cli/option_types.py | 19 +- core/dbt/cli/params.py | 21 +- core/dbt/clients/_jinja_blocks.py | 28 +- core/dbt/clients/agate_helper.py | 4 +- core/dbt/clients/git.py | 8 +- core/dbt/clients/jinja.py | 48 +- core/dbt/clients/jinja_static.py | 6 +- core/dbt/clients/system.py | 14 +- core/dbt/clients/yaml_helper.py | 2 +- core/dbt/compilation.py | 41 +- core/dbt/config/profile.py | 22 +- core/dbt/config/project.py | 32 +- core/dbt/config/renderer.py | 11 +- core/dbt/config/runtime.py | 18 +- core/dbt/config/selectors.py | 8 +- core/dbt/config/utils.py | 18 +- core/dbt/context/base.py | 23 +- core/dbt/context/configured.py | 6 +- core/dbt/context/context_config.py | 8 +- core/dbt/context/docs.py | 8 +- core/dbt/context/exceptions_jinja.py | 70 +- core/dbt/context/macro_resolver.py | 6 +- core/dbt/context/macros.py | 6 +- core/dbt/context/providers.py | 128 +- core/dbt/context/secret.py | 4 +- core/dbt/contracts/connection.py | 4 +- core/dbt/contracts/graph/manifest.py | 30 +- core/dbt/contracts/graph/model_config.py | 16 +- core/dbt/contracts/graph/nodes.py | 6 +- core/dbt/contracts/graph/unparsed.py | 18 +- core/dbt/contracts/project.py | 1 + core/dbt/contracts/relation.py | 14 +- core/dbt/contracts/results.py | 11 +- core/dbt/contracts/state.py | 10 +- core/dbt/contracts/util.py | 16 +- core/dbt/deps/git.py | 4 +- core/dbt/deps/registry.py | 18 +- core/dbt/deps/resolver.py | 16 +- core/dbt/docs/build/html/searchindex.js | 2 +- core/dbt/events/base_types.py | 71 +- core/dbt/events/eventmgr.py | 64 +- core/dbt/events/functions.py | 79 +- core/dbt/events/proto_types.py | 2035 +++++++++++------ core/dbt/events/types.proto | 1885 ++++++++++----- core/dbt/events/types.py | 813 ++----- core/dbt/exceptions.py | 923 +++++--- core/dbt/flags.py | 78 +- core/dbt/graph/cli.py | 33 +- core/dbt/graph/graph.py | 6 +- core/dbt/graph/queue.py | 11 +- core/dbt/graph/selector.py | 53 +- core/dbt/graph/selector_methods.py | 30 +- core/dbt/graph/selector_spec.py | 17 +- core/dbt/helper_types.py | 66 +- .../macros/adapters/freshness.sql | 2 +- .../models/incremental/merge.sql | 6 +- .../macros/python_model/python.sql | 17 +- core/dbt/internal_deprecations.py | 26 + core/dbt/lib.py | 4 +- core/dbt/main.py | 65 +- core/dbt/parser/base.py | 10 +- core/dbt/parser/generic_test.py | 8 +- core/dbt/parser/generic_test_builders.py | 46 +- core/dbt/parser/hooks.py | 4 +- core/dbt/parser/macros.py | 8 +- core/dbt/parser/manifest.py | 128 +- core/dbt/parser/models.py | 91 +- core/dbt/parser/partial.py | 36 +- core/dbt/parser/read_files.py | 8 +- core/dbt/parser/schemas.py | 108 +- core/dbt/parser/search.py | 6 +- core/dbt/parser/snapshots.py | 4 +- core/dbt/parser/sources.py | 4 +- core/dbt/parser/sql.py | 4 +- core/dbt/semver.py | 18 +- core/dbt/task/base.py | 72 +- core/dbt/task/build.py | 6 +- core/dbt/task/compile.py | 11 +- core/dbt/task/debug.py | 67 +- core/dbt/task/deps.py | 4 +- core/dbt/task/freshness.py | 26 +- core/dbt/task/generate.py | 8 +- core/dbt/task/init.py | 2 +- core/dbt/task/list.py | 53 +- core/dbt/task/printer.py | 12 +- core/dbt/task/run.py | 63 +- core/dbt/task/runnable.py | 51 +- core/dbt/task/seed.py | 22 +- core/dbt/task/snapshot.py | 13 +- core/dbt/task/sql.py | 4 +- core/dbt/task/test.py | 35 +- core/dbt/tests/fixtures/project.py | 16 +- core/dbt/tests/util.py | 7 +- core/dbt/tracking.py | 4 +- core/dbt/utils.py | 26 +- core/dbt/version.py | 6 +- core/setup.py | 6 +- docker/Dockerfile | 12 +- .../dbt/adapters/postgres/__version__.py | 2 +- .../dbt/adapters/postgres/connections.py | 6 +- .../postgres/dbt/adapters/postgres/impl.py | 20 +- .../dbt/adapters/postgres/relation.py | 4 +- plugins/postgres/setup.py | 2 +- pyproject.toml | 2 +- scripts/env-setup.sh | 6 + .../models/materialized.sql | 9 - .../018_adapter_ddl_tests/seed.sql | 110 - .../018_adapter_ddl_tests/test_adapter_ddl.py | 23 - .../022_timezones_tests/models/timezones.sql | 10 - .../022_timezones_tests/test_timezones.py | 52 - .../model-compilation-error/bad_ref.sql | 2 - .../models/example.sql | 2 - .../models/example_2.sql | 4 - .../models/model_error.sql | 2 - .../models/schema.yml | 12 - .../models/snapshottable.sql | 4 - .../seeds/example_seed.csv | 2 - .../033_event_tracking_tests/snapshots/a.sql | 4 - .../033_event_tracking_tests/test_events.py | 986 -------- .../035_docs_blocks_tests/test_docs_blocks.py | 184 -- .../models/my_model.sql | 7 - .../standalone_models/my_model.sql | 2 - .../test_external_reference.py | 78 - .../038_caching_tests/test_caching.py | 67 - test/integration/040_init_tests/test_init.py | 755 ------ .../macros-configs/macros.sql | 17 - .../macros/macros.sql | 17 - .../models/model1.sql | 3 - .../models/model2.sql | 3 - .../models/schema.yml | 15 - .../test_custom_aliases.py | 39 - .../045_test_severity_tests/models/model.sql | 1 - .../045_test_severity_tests/models/schema.yml | 19 - .../seeds/null_seed.csv | 21 - .../045_test_severity_tests/test_severity.py | 93 - .../045_test_severity_tests/tests/data.sql | 2 - .../models-unquoted/model.sql | 12 - .../052_column_quoting_tests/models/model.sql | 12 - .../052_column_quoting_tests/seeds/seed.csv | 4 - .../test_column_quotes.py | 78 - .../macros/ref_override_macro.sql | 4 - .../models/ref_override.sql | 3 - .../055_ref_override_tests/seeds/seed_1.csv | 4 - .../055_ref_override_tests/seeds/seed_2.csv | 4 - .../test_ref_override.py | 30 - .../macros/test_alter_column_type.sql | 5 - .../056_column_type_tests/pg_models/model.sql | 9 - .../pg_models/schema.yml | 14 - .../test_alter_column_types.py | 13 - .../test_column_types.py | 22 - .../057_run_query_tests/test_pg_types.py | 25 - .../models-column-missing/missing_column.sql | 2 - .../models-column-missing/schema.yaml | 8 - .../models/my_fun_docs.md | 10 - .../models/no_docs_model.sql | 1 - .../models/table_model.sql | 2 - .../models/view_model.sql | 2 - .../060_persist_docs_tests/seeds/seed.csv | 3 - .../test_persist_docs.py | 126 - .../changed_models/ephemeral_model.sql | 2 - .../changed_models/schema.yml | 9 - .../changed_models/table_model.sql | 5 - .../changed_models/view_model.sql | 1 - .../changed_models_bad/ephemeral_model.sql | 2 - .../changed_models_bad/schema.yml | 9 - .../changed_models_bad/table_model.sql | 5 - .../changed_models_bad/view_model.sql | 1 - .../changed_models_missing/schema.yml | 9 - .../changed_models_missing/table_model.sql | 2 - .../changed_models_missing/view_model.sql | 1 - .../macros/infinite_macros.sql | 13 - .../062_defer_state_tests/macros/macros.sql | 3 - .../models/ephemeral_model.sql | 2 - .../models/exposures.yml | 8 - .../062_defer_state_tests/models/schema.yml | 10 - .../models/table_model.sql | 5 - .../models/view_model.sql | 4 - .../previous_state/manifest.json | 6 - .../062_defer_state_tests/seeds/seed.csv | 3 - .../snapshots/my_snapshot.sql | 14 - .../062_defer_state_tests/test_defer_state.py | 344 --- .../test_modified_state.py | 211 -- .../test_run_results_state.py | 436 ---- .../models/quote_model.sql | 1 - .../models/schema.yml | 9 - .../test_column_comments.py | 43 - .../local_dependency/dbt_project.yml | 23 - .../local_dependency/macros/dep_macro.sql | 3 - .../models/model_to_import.sql | 1 - .../local_dependency/models/schema.yml | 10 - .../local_dependency/seeds/seed.csv | 2 - .../test-files/custom_schema_tests1.sql | 19 - .../test-files/custom_schema_tests2.sql | 19 - .../test-files/customers.sql | 19 - .../test-files/customers1.md | 5 - .../test-files/customers2.md | 5 - .../test-files/empty_schema.yml | 0 .../test-files/empty_schema_with_version.yml | 1 - .../test-files/env_var-sources.yml | 18 - .../test-files/env_var_macro.sql | 7 - .../test-files/env_var_macros.yml | 7 - .../test-files/env_var_metrics.yml | 30 - .../test-files/env_var_model.sql | 1 - .../test-files/env_var_model_one.sql | 1 - .../test-files/env_var_model_test.yml | 8 - .../test-files/env_var_schema.yml | 6 - .../test-files/env_var_schema2.yml | 11 - .../test-files/env_var_schema3.yml | 21 - .../test-files/generic_schema.yml | 9 - .../test-files/generic_test.sql | 26 - .../test-files/generic_test_edited.sql | 26 - .../test-files/generic_test_schema.yml | 10 - .../test-files/gsm_override.sql | 6 - .../test-files/gsm_override2.sql | 6 - .../test-files/macros-schema.yml | 8 - .../test-files/macros.yml | 4 - .../test-files/metric_model_a.sql | 21 - .../test-files/model_a.sql | 1 - .../test-files/model_b.sql | 1 - .../test-files/model_color.sql | 1 - .../test-files/model_four1.sql | 1 - .../test-files/model_four2.sql | 1 - .../test-files/model_one.sql | 1 - .../test-files/model_three.sql | 12 - .../test-files/model_three_disabled.sql | 12 - .../test-files/model_three_disabled2.sql | 13 - .../test-files/model_three_modified.sql | 14 - .../test-files/model_two.sql | 1 - .../test-files/models-schema1.yml | 5 - .../test-files/models-schema2.yml | 11 - .../test-files/models-schema2b.yml | 11 - .../test-files/models-schema3.yml | 12 - .../test-files/models-schema4.yml | 13 - .../test-files/models-schema4b.yml | 13 - .../test-files/my_analysis.sql | 1 - .../test-files/my_macro.sql | 7 - .../test-files/my_macro2.sql | 7 - .../test-files/my_metric.yml | 23 - .../test-files/my_test.sql | 2 - .../test-files/orders.sql | 1 - .../test-files/people.sql | 3 - .../test-files/people_metrics.yml | 30 - .../test-files/people_metrics2.yml | 30 - .../test-files/people_metrics3.yml | 17 - .../test-files/raw_customers.csv | 11 - .../test-files/ref_override.sql | 4 - .../test-files/ref_override2.sql | 4 - .../test-files/schema-models-c.yml | 14 - .../test-files/schema-sources1.yml | 17 - .../test-files/schema-sources2.yml | 29 - .../test-files/schema-sources3.yml | 28 - .../test-files/schema-sources4.yml | 30 - .../test-files/schema-sources5.yml | 29 - .../test-files/snapshot.sql | 29 - .../test-files/snapshot2.sql | 30 - .../test-files/sources-tests1.sql | 9 - .../test-files/sources-tests2.sql | 9 - .../test-files/test-macro.sql | 5 - .../test-files/test-macro2.sql | 5 - .../test-files/test_color.sql | 7 - .../test_partial_parsing.py | 578 ----- .../test_pp_metrics.py | 106 - .../068_partial_parsing_tests/test_pp_vars.py | 416 ---- .../models-circular-relationship/model_0.sql | 3 - .../models-circular-relationship/model_1.sql | 3 - .../models-circular-relationship/model_99.sql | 4 - .../models-circular-relationship/test.yml | 18 - .../models-failing/model_0.sql | 3 - .../models-failing/model_1.sql | 3 - .../models-failing/model_2.sql | 3 - .../models-failing/model_3.sql | 3 - .../models-failing/model_99.sql | 3 - .../069_build_tests/models-failing/test.yml | 15 - .../models-interdependent/model_a.sql | 1 - .../models-interdependent/model_c.sql | 1 - .../models-interdependent/schema.yml | 41 - .../models-simple-blocking/model_a.sql | 1 - .../models-simple-blocking/model_b.sql | 1 - .../models-simple-blocking/schema.yml | 8 - .../069_build_tests/models/model_0.sql | 3 - .../069_build_tests/models/model_1.sql | 3 - .../069_build_tests/models/model_2.sql | 3 - .../069_build_tests/models/model_99.sql | 3 - .../069_build_tests/models/test.yml | 15 - .../069_build_tests/seeds/countries.csv | 10 - .../069_build_tests/snapshots/snap_0.sql | 16 - .../069_build_tests/snapshots/snap_1.sql | 39 - .../069_build_tests/snapshots/snap_99.sql | 15 - .../069_build_tests/test-files/model_b.sql | 1 - .../test-files/model_b_null.sql | 1 - .../integration/069_build_tests/test_build.py | 143 -- .../069_build_tests/tests-failing/model_0.sql | 3 - .../069_build_tests/tests-failing/model_1.sql | 3 - .../069_build_tests/tests-failing/model_2.sql | 3 - .../tests-failing/model_99.sql | 3 - .../069_build_tests/tests-failing/test.yml | 18 - test/integration/base.py | 2 +- test/unit/test_adapter_connection_manager.py | 22 +- test/unit/test_cache.py | 2 +- test/unit/test_config.py | 12 +- test/unit/test_context.py | 4 +- test/unit/test_core_dbt_utils.py | 4 +- test/unit/test_deps.py | 8 +- test/unit/test_exceptions.py | 6 +- test/unit/test_flags.py | 78 +- test/unit/test_graph.py | 7 +- test/unit/test_graph_selection.py | 2 +- test/unit/test_graph_selector_methods.py | 12 +- test/unit/test_graph_selector_spec.py | 6 +- test/unit/test_jinja.py | 34 +- test/unit/test_parser.py | 22 +- test/unit/test_postgres_adapter.py | 6 +- .../test_registry_get_request_exception.py | 4 +- test/unit/test_semver.py | 4 +- .../adapter/dbt/tests/adapter/__version__.py | 2 +- .../dbt/tests/adapter/aliases/test_aliases.py | 24 +- .../basic/test_table_materialization.py | 96 + .../dbt/tests/adapter/caching/test_caching.py | 103 + .../tests/adapter/column_types/fixtures.py | 41 + .../adapter/column_types/test_column_types.py | 24 + .../tests/adapter/dbt_debug/test_dbt_debug.py | 12 +- .../dbt/tests/adapter/incremental/fixtures.py | 305 +++ .../test_incremental_on_schema_change.py | 104 + .../test_incremental_predicates.py | 26 +- .../query_comment/test_query_comment.py | 4 +- .../relations/test_changing_relation_type.py | 18 +- .../utils/fixture_escape_single_quotes.py | 33 +- tests/adapter/setup.py | 2 +- .../functional/artifacts/expected_manifest.py | 8 +- tests/functional/artifacts/test_override.py | 4 +- .../artifacts/test_previous_version_state.py | 4 +- .../basic/test_invalid_reference.py | 4 +- tests/functional/build/fixtures.py | 268 +++ tests/functional/build/test_build.py | 198 ++ tests/functional/colors/test_colors.py | 2 +- .../column_quoting/test_column_quotes.py | 100 + tests/functional/configs/test_configs.py | 17 +- .../configs/test_configs_in_schema_files.py | 6 +- .../functional/configs/test_disabled_model.py | 6 +- .../functional/configs/test_unused_configs.py | 4 +- .../context_methods/test_builtin_functions.py | 21 +- .../context_methods/test_cli_vars.py | 8 +- .../context_methods/test_custom_env_vars.py | 6 +- .../context_methods/test_secret_env_vars.py | 8 +- .../test_var_in_generate_name.py | 4 +- tests/functional/custom_aliases/fixtures.py | 68 + .../custom_aliases/test_custom_aliases.py | 49 + .../test_custom_singular_tests.py | 5 + tests/functional/cycles/test_cycles.py | 5 +- tests/functional/defer_state/fixtures.py | 101 + .../defer_state/test_defer_state.py | 273 +++ .../defer_state/test_modified_state.py | 263 +++ .../defer_state/test_run_results_state.py | 494 ++++ .../dependencies/test_local_dependency.py | 6 +- .../deprecations/test_deprecations.py | 10 +- .../docs/test_duplicate_docs_block.py | 35 + .../functional/docs/test_good_docs_blocks.py | 171 ++ tests/functional/docs/test_invalid_doc_ref.py | 47 + .../docs/test_missing_docs_blocks.py | 43 + .../duplicates/test_duplicate_analysis.py | 4 +- .../duplicates/test_duplicate_exposure.py | 4 +- .../duplicates/test_duplicate_macro.py | 6 +- .../duplicates/test_duplicate_metric.py | 4 +- .../duplicates/test_duplicate_model.py | 6 +- .../duplicates/test_duplicate_source.py | 4 +- tests/functional/exit_codes/fixtures.py | 2 +- .../functional/exit_codes/test_exit_codes.py | 52 +- tests/functional/exposures/fixtures.py | 1 - .../exposures/test_exposure_configs.py | 2 +- tests/functional/exposures/test_exposures.py | 8 +- .../test_external_reference.py | 59 + .../fail_fast/test_fail_fast_run.py | 6 +- tests/functional/hooks/test_model_hooks.py | 27 +- .../incremental_schema_tests/fixtures.py | 1 - .../test_incremental_schema.py | 76 +- tests/functional/init/test_init.py | 688 ++++++ .../test_invalid_models.py | 14 +- tests/functional/logging/test_logging.py | 31 +- tests/functional/logging/test_meta_logging.py | 44 + tests/functional/macros/test_macros.py | 4 +- .../materializations/test_incremental.py | 6 +- tests/functional/metrics/fixtures.py | 55 + .../functional/metrics/test_metric_configs.py | 8 +- .../metrics/test_metric_helper_functions.py | 5 +- tests/functional/metrics/test_metrics.py | 93 +- tests/functional/minimal_cli/fixtures.py | 24 +- .../minimal_cli/test_minimal_cli.py | 47 +- tests/functional/partial_parsing/fixtures.py | 1126 +++++++++ .../partial_parsing/test_partial_parsing.py | 643 ++++++ .../partial_parsing/test_pp_metrics.py | 73 + .../partial_parsing/test_pp_vars.py | 386 ++++ .../functional/persist_docs_tests/fixtures.py | 67 + .../persist_docs_tests/test_persist_docs.py | 194 ++ .../postgres/test_postgres_indexes.py | 10 +- tests/functional/profiles/test_profile_dir.py | 26 +- .../ref_override/test_ref_override.py | 79 + .../relation_names/test_relation_name.py | 26 +- .../run_operations/test_run_operations.py | 58 +- .../functional/run_query/test_types.py | 17 + .../schema_tests/test_schema_v2_tests.py | 18 +- tests/functional/severity/test_severity.py | 122 + .../test_missing_strategy_snapshot.py | 4 +- .../test_source_overrides_duplicate_model.py | 4 +- .../functional/sources/test_simple_source.py | 4 +- .../sources/test_source_fresher_state.py | 8 +- .../sources/test_source_freshness.py | 2 +- .../functional/statements/test_statements.py | 10 +- .../test_store_test_failures.py | 81 +- tests/functional/test_selection/fixtures.py | 2 +- .../test_selection_expansion.py | 99 +- tests/functional/timezones/test_timezones.py | 67 + tests/unit/test_cli_flags.py | 35 +- tests/unit/test_connection_retries.py | 4 +- tests/unit/test_deprecations.py | 602 +++++ tests/unit/test_events.py | 569 ++--- tests/unit/test_functions.py | 45 + tests/unit/test_helper_types.py | 45 + tests/unit/test_proto_events.py | 144 +- 458 files changed, 13611 insertions(+), 11105 deletions(-) create mode 100644 .changes/unreleased/Features-20230107-003157.yaml create mode 100644 .changes/unreleased/Features-20230118-233801.yaml create mode 100644 .changes/unreleased/Features-20230120-112921.yaml create mode 100644 .changes/unreleased/Fixes-20230116-123645.yaml create mode 100644 .changes/unreleased/Fixes-20230117-101342.yaml create mode 100644 .changes/unreleased/Fixes-20230124-115837.yaml create mode 100644 .changes/unreleased/Fixes-20230124-141943.yaml create mode 100644 .changes/unreleased/Fixes-20230125-191739.yaml create mode 100644 .changes/unreleased/Under the Hood-20230113-132513.yaml create mode 100644 .changes/unreleased/Under the Hood-20230113-150700.yaml create mode 100644 .changes/unreleased/Under the Hood-20230117-111737.yaml create mode 100644 .changes/unreleased/Under the Hood-20230120-172254.yaml create mode 100644 .changes/unreleased/Under the Hood-20230122-215235.yaml create mode 100644 .changes/unreleased/Under the Hood-20230124-153553.yaml create mode 100644 .changes/unreleased/Under the Hood-20230126-135939.yaml create mode 100644 .github/workflows/nightly-release.yml create mode 100644 core/dbt/internal_deprecations.py create mode 100644 scripts/env-setup.sh delete mode 100644 test/integration/018_adapter_ddl_tests/models/materialized.sql delete mode 100644 test/integration/018_adapter_ddl_tests/seed.sql delete mode 100644 test/integration/018_adapter_ddl_tests/test_adapter_ddl.py delete mode 100644 test/integration/022_timezones_tests/models/timezones.sql delete mode 100644 test/integration/022_timezones_tests/test_timezones.py delete mode 100644 test/integration/033_event_tracking_tests/model-compilation-error/bad_ref.sql delete mode 100644 test/integration/033_event_tracking_tests/models/example.sql delete mode 100644 test/integration/033_event_tracking_tests/models/example_2.sql delete mode 100644 test/integration/033_event_tracking_tests/models/model_error.sql delete mode 100644 test/integration/033_event_tracking_tests/models/schema.yml delete mode 100644 test/integration/033_event_tracking_tests/models/snapshottable.sql delete mode 100644 test/integration/033_event_tracking_tests/seeds/example_seed.csv delete mode 100644 test/integration/033_event_tracking_tests/snapshots/a.sql delete mode 100644 test/integration/033_event_tracking_tests/test_events.py delete mode 100644 test/integration/035_docs_blocks_tests/test_docs_blocks.py delete mode 100644 test/integration/037_external_reference_tests/models/my_model.sql delete mode 100644 test/integration/037_external_reference_tests/standalone_models/my_model.sql delete mode 100644 test/integration/037_external_reference_tests/test_external_reference.py delete mode 100644 test/integration/038_caching_tests/test_caching.py delete mode 100644 test/integration/040_init_tests/test_init.py delete mode 100644 test/integration/043_custom_aliases_tests/macros-configs/macros.sql delete mode 100644 test/integration/043_custom_aliases_tests/macros/macros.sql delete mode 100644 test/integration/043_custom_aliases_tests/models/model1.sql delete mode 100644 test/integration/043_custom_aliases_tests/models/model2.sql delete mode 100644 test/integration/043_custom_aliases_tests/models/schema.yml delete mode 100644 test/integration/043_custom_aliases_tests/test_custom_aliases.py delete mode 100644 test/integration/045_test_severity_tests/models/model.sql delete mode 100644 test/integration/045_test_severity_tests/models/schema.yml delete mode 100644 test/integration/045_test_severity_tests/seeds/null_seed.csv delete mode 100644 test/integration/045_test_severity_tests/test_severity.py delete mode 100644 test/integration/045_test_severity_tests/tests/data.sql delete mode 100644 test/integration/052_column_quoting_tests/models-unquoted/model.sql delete mode 100644 test/integration/052_column_quoting_tests/models/model.sql delete mode 100644 test/integration/052_column_quoting_tests/seeds/seed.csv delete mode 100644 test/integration/052_column_quoting_tests/test_column_quotes.py delete mode 100644 test/integration/055_ref_override_tests/macros/ref_override_macro.sql delete mode 100644 test/integration/055_ref_override_tests/models/ref_override.sql delete mode 100644 test/integration/055_ref_override_tests/seeds/seed_1.csv delete mode 100644 test/integration/055_ref_override_tests/seeds/seed_2.csv delete mode 100644 test/integration/055_ref_override_tests/test_ref_override.py delete mode 100644 test/integration/056_column_type_tests/macros/test_alter_column_type.sql delete mode 100644 test/integration/056_column_type_tests/pg_models/model.sql delete mode 100644 test/integration/056_column_type_tests/pg_models/schema.yml delete mode 100644 test/integration/056_column_type_tests/test_alter_column_types.py delete mode 100644 test/integration/056_column_type_tests/test_column_types.py delete mode 100644 test/integration/057_run_query_tests/test_pg_types.py delete mode 100644 test/integration/060_persist_docs_tests/models-column-missing/missing_column.sql delete mode 100644 test/integration/060_persist_docs_tests/models-column-missing/schema.yaml delete mode 100644 test/integration/060_persist_docs_tests/models/my_fun_docs.md delete mode 100644 test/integration/060_persist_docs_tests/models/no_docs_model.sql delete mode 100644 test/integration/060_persist_docs_tests/models/table_model.sql delete mode 100644 test/integration/060_persist_docs_tests/models/view_model.sql delete mode 100644 test/integration/060_persist_docs_tests/seeds/seed.csv delete mode 100644 test/integration/060_persist_docs_tests/test_persist_docs.py delete mode 100644 test/integration/062_defer_state_tests/changed_models/ephemeral_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models/schema.yml delete mode 100644 test/integration/062_defer_state_tests/changed_models/table_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models/view_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models_bad/ephemeral_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models_bad/schema.yml delete mode 100644 test/integration/062_defer_state_tests/changed_models_bad/table_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models_bad/view_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models_missing/schema.yml delete mode 100644 test/integration/062_defer_state_tests/changed_models_missing/table_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models_missing/view_model.sql delete mode 100644 test/integration/062_defer_state_tests/macros/infinite_macros.sql delete mode 100644 test/integration/062_defer_state_tests/macros/macros.sql delete mode 100644 test/integration/062_defer_state_tests/models/ephemeral_model.sql delete mode 100644 test/integration/062_defer_state_tests/models/exposures.yml delete mode 100644 test/integration/062_defer_state_tests/models/schema.yml delete mode 100644 test/integration/062_defer_state_tests/models/table_model.sql delete mode 100644 test/integration/062_defer_state_tests/models/view_model.sql delete mode 100644 test/integration/062_defer_state_tests/previous_state/manifest.json delete mode 100644 test/integration/062_defer_state_tests/seeds/seed.csv delete mode 100644 test/integration/062_defer_state_tests/snapshots/my_snapshot.sql delete mode 100644 test/integration/062_defer_state_tests/test_defer_state.py delete mode 100644 test/integration/062_defer_state_tests/test_modified_state.py delete mode 100644 test/integration/062_defer_state_tests/test_run_results_state.py delete mode 100644 test/integration/064_column_comments_tests/models/quote_model.sql delete mode 100644 test/integration/064_column_comments_tests/models/schema.yml delete mode 100644 test/integration/064_column_comments_tests/test_column_comments.py delete mode 100644 test/integration/068_partial_parsing_tests/local_dependency/dbt_project.yml delete mode 100644 test/integration/068_partial_parsing_tests/local_dependency/macros/dep_macro.sql delete mode 100644 test/integration/068_partial_parsing_tests/local_dependency/models/model_to_import.sql delete mode 100644 test/integration/068_partial_parsing_tests/local_dependency/models/schema.yml delete mode 100644 test/integration/068_partial_parsing_tests/local_dependency/seeds/seed.csv delete mode 100644 test/integration/068_partial_parsing_tests/test-files/custom_schema_tests1.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/custom_schema_tests2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/customers.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/customers1.md delete mode 100644 test/integration/068_partial_parsing_tests/test-files/customers2.md delete mode 100644 test/integration/068_partial_parsing_tests/test-files/empty_schema.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/empty_schema_with_version.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var-sources.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_macro.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_macros.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_metrics.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_model.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_model_one.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_model_test.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_schema2.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_schema3.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/generic_schema.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/generic_test.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/generic_test_edited.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/generic_test_schema.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/gsm_override.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/gsm_override2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/macros-schema.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/macros.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/metric_model_a.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_a.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_b.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_color.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_four1.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_four2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_one.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_three.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_three_disabled.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_three_disabled2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_three_modified.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_two.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/models-schema1.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/models-schema2.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/models-schema2b.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/models-schema3.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/models-schema4.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/models-schema4b.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/my_analysis.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/my_macro.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/my_macro2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/my_metric.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/my_test.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/orders.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/people.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/people_metrics.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/people_metrics2.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/people_metrics3.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/raw_customers.csv delete mode 100644 test/integration/068_partial_parsing_tests/test-files/ref_override.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/ref_override2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/schema-models-c.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/schema-sources1.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/schema-sources2.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/schema-sources3.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/schema-sources4.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/schema-sources5.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/snapshot.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/snapshot2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/sources-tests1.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/sources-tests2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/test-macro.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/test-macro2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/test_color.sql delete mode 100644 test/integration/068_partial_parsing_tests/test_partial_parsing.py delete mode 100644 test/integration/068_partial_parsing_tests/test_pp_metrics.py delete mode 100644 test/integration/068_partial_parsing_tests/test_pp_vars.py delete mode 100644 test/integration/069_build_tests/models-circular-relationship/model_0.sql delete mode 100644 test/integration/069_build_tests/models-circular-relationship/model_1.sql delete mode 100644 test/integration/069_build_tests/models-circular-relationship/model_99.sql delete mode 100644 test/integration/069_build_tests/models-circular-relationship/test.yml delete mode 100644 test/integration/069_build_tests/models-failing/model_0.sql delete mode 100644 test/integration/069_build_tests/models-failing/model_1.sql delete mode 100644 test/integration/069_build_tests/models-failing/model_2.sql delete mode 100644 test/integration/069_build_tests/models-failing/model_3.sql delete mode 100644 test/integration/069_build_tests/models-failing/model_99.sql delete mode 100644 test/integration/069_build_tests/models-failing/test.yml delete mode 100644 test/integration/069_build_tests/models-interdependent/model_a.sql delete mode 100644 test/integration/069_build_tests/models-interdependent/model_c.sql delete mode 100644 test/integration/069_build_tests/models-interdependent/schema.yml delete mode 100644 test/integration/069_build_tests/models-simple-blocking/model_a.sql delete mode 100644 test/integration/069_build_tests/models-simple-blocking/model_b.sql delete mode 100644 test/integration/069_build_tests/models-simple-blocking/schema.yml delete mode 100644 test/integration/069_build_tests/models/model_0.sql delete mode 100644 test/integration/069_build_tests/models/model_1.sql delete mode 100644 test/integration/069_build_tests/models/model_2.sql delete mode 100644 test/integration/069_build_tests/models/model_99.sql delete mode 100644 test/integration/069_build_tests/models/test.yml delete mode 100644 test/integration/069_build_tests/seeds/countries.csv delete mode 100644 test/integration/069_build_tests/snapshots/snap_0.sql delete mode 100644 test/integration/069_build_tests/snapshots/snap_1.sql delete mode 100644 test/integration/069_build_tests/snapshots/snap_99.sql delete mode 100644 test/integration/069_build_tests/test-files/model_b.sql delete mode 100644 test/integration/069_build_tests/test-files/model_b_null.sql delete mode 100644 test/integration/069_build_tests/test_build.py delete mode 100644 test/integration/069_build_tests/tests-failing/model_0.sql delete mode 100644 test/integration/069_build_tests/tests-failing/model_1.sql delete mode 100644 test/integration/069_build_tests/tests-failing/model_2.sql delete mode 100644 test/integration/069_build_tests/tests-failing/model_99.sql delete mode 100644 test/integration/069_build_tests/tests-failing/test.yml create mode 100644 tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py create mode 100644 tests/adapter/dbt/tests/adapter/caching/test_caching.py rename test/integration/056_column_type_tests/macros/test_is_type.sql => tests/adapter/dbt/tests/adapter/column_types/fixtures.py (73%) create mode 100644 tests/adapter/dbt/tests/adapter/column_types/test_column_types.py create mode 100644 tests/adapter/dbt/tests/adapter/incremental/fixtures.py create mode 100644 tests/adapter/dbt/tests/adapter/incremental/test_incremental_on_schema_change.py create mode 100644 tests/functional/build/fixtures.py create mode 100644 tests/functional/build/test_build.py create mode 100644 tests/functional/column_quoting/test_column_quotes.py create mode 100644 tests/functional/custom_aliases/fixtures.py create mode 100644 tests/functional/custom_aliases/test_custom_aliases.py create mode 100644 tests/functional/defer_state/fixtures.py create mode 100644 tests/functional/defer_state/test_defer_state.py create mode 100644 tests/functional/defer_state/test_modified_state.py create mode 100644 tests/functional/defer_state/test_run_results_state.py create mode 100644 tests/functional/docs/test_duplicate_docs_block.py create mode 100644 tests/functional/docs/test_good_docs_blocks.py create mode 100644 tests/functional/docs/test_invalid_doc_ref.py create mode 100644 tests/functional/docs/test_missing_docs_blocks.py create mode 100644 tests/functional/external_reference/test_external_reference.py create mode 100644 tests/functional/init/test_init.py create mode 100644 tests/functional/logging/test_meta_logging.py create mode 100644 tests/functional/partial_parsing/fixtures.py create mode 100644 tests/functional/partial_parsing/test_partial_parsing.py create mode 100644 tests/functional/partial_parsing/test_pp_metrics.py create mode 100644 tests/functional/partial_parsing/test_pp_vars.py rename test/integration/060_persist_docs_tests/models/schema.yml => tests/functional/persist_docs_tests/fixtures.py (62%) create mode 100644 tests/functional/persist_docs_tests/test_persist_docs.py create mode 100644 tests/functional/ref_override/test_ref_override.py rename test/integration/057_run_query_tests/macros/test_pg_array_queries.sql => tests/functional/run_query/test_types.py (52%) create mode 100644 tests/functional/severity/test_severity.py create mode 100644 tests/functional/timezones/test_timezones.py create mode 100644 tests/unit/test_deprecations.py create mode 100644 tests/unit/test_functions.py create mode 100644 tests/unit/test_helper_types.py diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 3cdca1ad352..4db0c9a0c58 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,12 +1,14 @@ [bumpversion] -current_version = 1.4.0b1 +current_version = 1.5.0a1 parse = (?P\d+) \.(?P\d+) \.(?P\d+) ((?Pa|b|rc) (?P
    \d+)  # pre-release version num
    +	)(\.(?P[a-z..0-9]+)
     	)?
     serialize =
    +	{major}.{minor}.{patch}{prekind}{pre}.{nightly}
     	{major}.{minor}.{patch}{prekind}{pre}
     	{major}.{minor}.{patch}
     commit = False
    @@ -24,6 +26,8 @@ values =
     [bumpversion:part:pre]
     first_value = 1
     
    +[bumpversion:part:nightly]
    +
     [bumpversion:file:core/setup.py]
     
     [bumpversion:file:core/dbt/version.py]
    diff --git a/.changes/0.0.0.md b/.changes/0.0.0.md
    index 5359cd07bf2..f3a5e03d1a1 100644
    --- a/.changes/0.0.0.md
    +++ b/.changes/0.0.0.md
    @@ -3,6 +3,7 @@
     For information on prior major and minor releases, see their changelogs:
     
     
    +* [1.4](https://github.com/dbt-labs/dbt-core/blob/1.4.latest/CHANGELOG.md)
     * [1.3](https://github.com/dbt-labs/dbt-core/blob/1.3.latest/CHANGELOG.md)
     * [1.2](https://github.com/dbt-labs/dbt-core/blob/1.2.latest/CHANGELOG.md)
     * [1.1](https://github.com/dbt-labs/dbt-core/blob/1.1.latest/CHANGELOG.md)
    diff --git a/.changes/unreleased/Features-20230107-003157.yaml b/.changes/unreleased/Features-20230107-003157.yaml
    new file mode 100644
    index 00000000000..27858b516be
    --- /dev/null
    +++ b/.changes/unreleased/Features-20230107-003157.yaml
    @@ -0,0 +1,6 @@
    +kind: Features
    +body: Have dbt debug spit out structured json logs with flags enabled.
    +time: 2023-01-07T00:31:57.516063-08:00
    +custom:
    +  Author: versusfacit
    +  Issue: "5353"
    diff --git a/.changes/unreleased/Features-20230118-233801.yaml b/.changes/unreleased/Features-20230118-233801.yaml
    new file mode 100644
    index 00000000000..38affa143e8
    --- /dev/null
    +++ b/.changes/unreleased/Features-20230118-233801.yaml
    @@ -0,0 +1,6 @@
    +kind: Features
    +body: add adapter_response to dbt test and freshness result
    +time: 2023-01-18T23:38:01.857342+08:00
    +custom:
    +  Author: aezomz
    +  Issue: "2964"
    diff --git a/.changes/unreleased/Features-20230120-112921.yaml b/.changes/unreleased/Features-20230120-112921.yaml
    new file mode 100644
    index 00000000000..01532220a7f
    --- /dev/null
    +++ b/.changes/unreleased/Features-20230120-112921.yaml
    @@ -0,0 +1,6 @@
    +kind: Features
    +body: Improve error message for packages missing `dbt_project.yml`
    +time: 2023-01-20T11:29:21.509967-07:00
    +custom:
    +  Author: dbeatty10
    +  Issue: "6663"
    diff --git a/.changes/unreleased/Fixes-20230116-123645.yaml b/.changes/unreleased/Fixes-20230116-123645.yaml
    new file mode 100644
    index 00000000000..b3c35d8e2be
    --- /dev/null
    +++ b/.changes/unreleased/Fixes-20230116-123645.yaml
    @@ -0,0 +1,6 @@
    +kind: Fixes
    +body: Respect quoting config for dbt.ref(), dbt.source(), and dbt.this() in dbt-py models
    +time: 2023-01-16T12:36:45.63092+01:00
    +custom:
    +  Author: jtcohen6
    +  Issue: 6103 6619
    diff --git a/.changes/unreleased/Fixes-20230117-101342.yaml b/.changes/unreleased/Fixes-20230117-101342.yaml
    new file mode 100644
    index 00000000000..9a879e60a89
    --- /dev/null
    +++ b/.changes/unreleased/Fixes-20230117-101342.yaml
    @@ -0,0 +1,6 @@
    +kind: Fixes
    +body: Provide backward compatibility for `get_merge_sql` arguments
    +time: 2023-01-17T10:13:42.118336-06:00
    +custom:
    +  Author: dave-connors-3
    +  Issue: "6625"
    diff --git a/.changes/unreleased/Fixes-20230124-115837.yaml b/.changes/unreleased/Fixes-20230124-115837.yaml
    new file mode 100644
    index 00000000000..c74e83bbaf0
    --- /dev/null
    +++ b/.changes/unreleased/Fixes-20230124-115837.yaml
    @@ -0,0 +1,6 @@
    +kind: Fixes
    +body: Include adapter_response in NodeFinished run_result log event
    +time: 2023-01-24T11:58:37.74179-05:00
    +custom:
    +  Author: gshank
    +  Issue: "6703"
    diff --git a/.changes/unreleased/Fixes-20230124-141943.yaml b/.changes/unreleased/Fixes-20230124-141943.yaml
    new file mode 100644
    index 00000000000..4b85413de58
    --- /dev/null
    +++ b/.changes/unreleased/Fixes-20230124-141943.yaml
    @@ -0,0 +1,6 @@
    +kind: Fixes
    +body: Sort cli vars before hashing for partial parsing
    +time: 2023-01-24T14:19:43.333628-05:00
    +custom:
    +  Author: gshank
    +  Issue: "6710"
    diff --git a/.changes/unreleased/Fixes-20230125-191739.yaml b/.changes/unreleased/Fixes-20230125-191739.yaml
    new file mode 100644
    index 00000000000..fff39574ed9
    --- /dev/null
    +++ b/.changes/unreleased/Fixes-20230125-191739.yaml
    @@ -0,0 +1,6 @@
    +kind: Fixes
    +body: '[Regression] exposure_content referenced incorrectly'
    +time: 2023-01-25T19:17:39.942081-05:00
    +custom:
    +  Author: Mathyoub
    +  Issue: "6738"
    diff --git a/.changes/unreleased/Under the Hood-20230113-132513.yaml b/.changes/unreleased/Under the Hood-20230113-132513.yaml
    new file mode 100644
    index 00000000000..2274fbc01a7
    --- /dev/null
    +++ b/.changes/unreleased/Under the Hood-20230113-132513.yaml	
    @@ -0,0 +1,6 @@
    +kind: Under the Hood
    +body: Fix use of ConnectionReused logging event
    +time: 2023-01-13T13:25:13.023168-05:00
    +custom:
    +  Author: gshank
    +  Issue: "6168"
    diff --git a/.changes/unreleased/Under the Hood-20230113-150700.yaml b/.changes/unreleased/Under the Hood-20230113-150700.yaml
    new file mode 100644
    index 00000000000..178603104e9
    --- /dev/null
    +++ b/.changes/unreleased/Under the Hood-20230113-150700.yaml	
    @@ -0,0 +1,6 @@
    +kind: Under the Hood
    +body: Port docs tests to pytest
    +time: 2023-01-13T15:07:00.477038-05:00
    +custom:
    +  Author: peterallenwebb
    +  Issue: "6573"
    diff --git a/.changes/unreleased/Under the Hood-20230117-111737.yaml b/.changes/unreleased/Under the Hood-20230117-111737.yaml
    new file mode 100644
    index 00000000000..126a25ea28a
    --- /dev/null
    +++ b/.changes/unreleased/Under the Hood-20230117-111737.yaml	
    @@ -0,0 +1,6 @@
    +kind: Under the Hood
    +body: Update deprecated github action command
    +time: 2023-01-17T11:17:37.046095-06:00
    +custom:
    +  Author: davidbloss
    +  Issue: "6153"
    diff --git a/.changes/unreleased/Under the Hood-20230120-172254.yaml b/.changes/unreleased/Under the Hood-20230120-172254.yaml
    new file mode 100644
    index 00000000000..3f65b39f99e
    --- /dev/null
    +++ b/.changes/unreleased/Under the Hood-20230120-172254.yaml	
    @@ -0,0 +1,7 @@
    +kind: Under the Hood
    +body: Replaced the EmptyLine event with a more general Formatting event, and added
    +  a Note event.
    +time: 2023-01-20T17:22:54.45828-05:00
    +custom:
    +  Author: peterallenwebb
    +  Issue: "6481"
    diff --git a/.changes/unreleased/Under the Hood-20230122-215235.yaml b/.changes/unreleased/Under the Hood-20230122-215235.yaml
    new file mode 100644
    index 00000000000..760d8ea4838
    --- /dev/null
    +++ b/.changes/unreleased/Under the Hood-20230122-215235.yaml	
    @@ -0,0 +1,6 @@
    +kind: Under the Hood
    +body: Small optimization on manifest parsing benefitting large DAGs
    +time: 2023-01-22T21:52:35.549814+01:00
    +custom:
    +  Author: boxysean
    +  Issue: "6697"
    diff --git a/.changes/unreleased/Under the Hood-20230124-153553.yaml b/.changes/unreleased/Under the Hood-20230124-153553.yaml
    new file mode 100644
    index 00000000000..0a540d6da55
    --- /dev/null
    +++ b/.changes/unreleased/Under the Hood-20230124-153553.yaml	
    @@ -0,0 +1,6 @@
    +kind: Under the Hood
    +body: Revised and simplified various structured logging events
    +time: 2023-01-24T15:35:53.065356-05:00
    +custom:
    +  Author: peterallenwebb
    +  Issue: 6664 6665 6666
    diff --git a/.changes/unreleased/Under the Hood-20230126-135939.yaml b/.changes/unreleased/Under the Hood-20230126-135939.yaml
    new file mode 100644
    index 00000000000..091f0a65864
    --- /dev/null
    +++ b/.changes/unreleased/Under the Hood-20230126-135939.yaml	
    @@ -0,0 +1,6 @@
    +kind: Under the Hood
    +body: ' Optimized GraphQueue to remove graph analysis bottleneck in large dags.'
    +time: 2023-01-26T13:59:39.518345-05:00
    +custom:
    +  Author: peterallenwebb
    +  Issue: "6759"
    diff --git a/.flake8 b/.flake8
    index 38b207c6e9b..e39b2fa4646 100644
    --- a/.flake8
    +++ b/.flake8
    @@ -9,4 +9,4 @@ ignore =
         E203 # makes Flake8 work like black
         E741
         E501 # long line checking is done in black
    -exclude = test
    +exclude = test/
    diff --git a/.github/_README.md b/.github/_README.md
    index 4da081fe2b6..f624fc5fec6 100644
    --- a/.github/_README.md
    +++ b/.github/_README.md
    @@ -63,12 +63,12 @@ permissions:
       contents: read
       pull-requests: write
     ```
    -    
    +
     ### Secrets
     - When to use a [Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) vs the [GITHUB_TOKEN](https://docs.github.com/en/actions/security-guides/automatic-token-authentication) generated for the action?
     
         The `GITHUB_TOKEN` is used by default.  In most cases it is sufficient for what you need.
    -    
    +
         If you expect the workflow to result in a commit to that should retrigger workflows, you will need to use a Personal Access Token for the bot to commit the file. When using the GITHUB_TOKEN, the resulting commit will not trigger another GitHub Actions Workflow run. This is due to limitations set by GitHub. See [the docs](https://docs.github.com/en/actions/security-guides/automatic-token-authentication#using-the-github_token-in-a-workflow) for a more detailed explanation.
     
         For example, we must use a PAT in our workflow to commit a new changelog yaml file for bot PRs.  Once the file has been committed to the branch, it should retrigger the check to validate that a changelog exists on the PR.  Otherwise, it would stay in a failed state since the check would never retrigger.
    @@ -105,7 +105,7 @@ Some triggers of note that we use:
     
       ```
       # **what?**
    -  # Describe what the action does.  
    +  # Describe what the action does.
     
       # **why?**
       # Why does this action exist?
    @@ -138,7 +138,7 @@ Some triggers of note that we use:
             id: fp
             run: |
               FILEPATH=.changes/unreleased/Dependencies-${{ steps.filename_time.outputs.time }}.yaml
    -          echo "::set-output name=FILEPATH::$FILEPATH"
    +          echo "FILEPATH=$FILEPATH" >> $GITHUB_OUTPUT
       ```
     
     - Print out all variables you will reference as the first step of a job.  This allows for easier debugging.  The first job should log all inputs.  Subsequent jobs should reference outputs of other jobs, if present.
    @@ -158,14 +158,14 @@ Some triggers of note that we use:
             echo "The build_script_path:              ${{ inputs.build_script_path }}"
             echo "The s3_bucket_name:                 ${{ inputs.s3_bucket_name }}"
             echo "The package_test_command:           ${{ inputs.package_test_command }}"
    -      
    +
         # collect all the variables that need to be used in subsequent jobs
         - name: Set Variables
           id: variables
           run: |
    -        echo "::set-output name=important_path::'performance/runner/Cargo.toml'"
    -        echo "::set-output name=release_id::${{github.event.inputs.release_id}}"
    -        echo "::set-output name=open_prs::${{github.event.inputs.open_prs}}"
    +        echo "important_path='performance/runner/Cargo.toml'" >> $GITHUB_OUTPUT
    +        echo "release_id=${{github.event.inputs.release_id}}" >> $GITHUB_OUTPUT
    +        echo "open_prs=${{github.event.inputs.open_prs}}" >> $GITHUB_OUTPUT
     
       job2:
         needs: [job1]
    @@ -190,7 +190,7 @@ ___
     ### Actions from the Marketplace
     - Don’t use external actions for things that can easily be accomplished manually.
     - Always read through what an external action does before using it!  Often an action in the GitHub Actions Marketplace can be replaced with a few lines in bash.  This is much more maintainable (and won’t change under us) and clear as to what’s actually happening.  It also prevents any
    -- Pin actions _we don't control_ to tags. 
    +- Pin actions _we don't control_ to tags.
     
     ### Connecting to AWS
     - Authenticate with the aws managed workflow
    @@ -208,7 +208,7 @@ ___
     
       ```yaml
       - name: Copy Artifacts from S3 via CLI
    -    run: aws s3 cp ${{ env.s3_bucket }} . --recursive 
    +    run: aws s3 cp ${{ env.s3_bucket }} . --recursive
       ```
     
     ### Testing
    diff --git a/.github/actions/latest-wrangler/main.py b/.github/actions/latest-wrangler/main.py
    index 23e14cf5abe..db91cf8354b 100644
    --- a/.github/actions/latest-wrangler/main.py
    +++ b/.github/actions/latest-wrangler/main.py
    @@ -28,11 +28,12 @@
         if package_request.status_code == 404:
             if halt_on_missing:
                 sys.exit(1)
    -        else:
    -            # everything is the latest if the package doesn't exist
    -            print(f"::set-output name=latest::{True}")
    -            print(f"::set-output name=minor_latest::{True}")
    -            sys.exit(0)
    +        # everything is the latest if the package doesn't exist
    +        github_output = os.environ.get("GITHUB_OUTPUT")
    +        with open(github_output, "at", encoding="utf-8") as gh_output:
    +            gh_output.write("latest=True")
    +            gh_output.write("minor_latest=True")
    +        sys.exit(0)
     
         # TODO: verify package meta is "correct"
         # https://github.com/dbt-labs/dbt-core/issues/4640
    @@ -91,5 +92,7 @@ def is_latest(
         latest = is_latest(pre_rel, new_version, current_latest)
         minor_latest = is_latest(pre_rel, new_version, current_minor_latest)
     
    -    print(f"::set-output name=latest::{latest}")
    -    print(f"::set-output name=minor_latest::{minor_latest}")
    +    github_output = os.environ.get("GITHUB_OUTPUT")
    +    with open(github_output, "at", encoding="utf-8") as gh_output:
    +        gh_output.write(f"latest={latest}")
    +        gh_output.write(f"minor_latest={minor_latest}")
    diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
    index 8138b730d34..7f4e8607185 100644
    --- a/.github/workflows/main.yml
    +++ b/.github/workflows/main.yml
    @@ -59,7 +59,9 @@ jobs:
               mypy --version
               python -m pip install -r requirements.txt
               python -m pip install -r dev-requirements.txt
    -          dbt --version
    +          # Running version disabled temporarily because version isn't implemented on this branch
    +          # Please un-comment it when GH #6757 / CT-1926 is complete
    +          #dbt --version
     
           - name: Run pre-commit hooks
             run: pre-commit run --all-files --show-diff-on-failure
    @@ -101,7 +103,9 @@ jobs:
           - name: Get current date
             if: always()
             id: date
    -        run: echo "::set-output name=date::$(date +'%Y-%m-%dT%H_%M_%S')" #no colons allowed for artifacts
    +        run: |
    +          CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts
    +          echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
     
           - uses: actions/upload-artifact@v2
             if: always()
    @@ -168,7 +172,9 @@ jobs:
           - name: Get current date
             if: always()
             id: date
    -        run: echo "::set-output name=date::$(date +'%Y_%m_%dT%H_%M_%S')" #no colons allowed for artifacts
    +        run: |
    +          CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts
    +          echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
     
           - uses: actions/upload-artifact@v2
             if: always()
    diff --git a/.github/workflows/nightly-release.yml b/.github/workflows/nightly-release.yml
    new file mode 100644
    index 00000000000..b668d62eccf
    --- /dev/null
    +++ b/.github/workflows/nightly-release.yml
    @@ -0,0 +1,109 @@
    +# **what?**
    +# Nightly releases to GitHub and PyPI. This workflow produces the following outcome:
    +# - generate and validate data for night release (commit SHA, version number, release branch);
    +# - pass data to release workflow;
    +# - night release will be pushed to GitHub as a draft release;
    +# - night build will be pushed to test PyPI;
    +#
    +# **why?**
    +# Ensure an automated and tested release process for nightly builds
    +#
    +# **when?**
    +# This workflow runs on schedule or can be run manually on demand.
    +
    +name: Nightly Test Release to GitHub and PyPI
    +
    +on:
    +  workflow_dispatch: # for manual triggering
    +  schedule:
    +    - cron: 0 9 * * *
    +
    +permissions:
    +  contents: write # this is the permission that allows creating a new release
    +
    +defaults:
    +  run:
    +    shell: bash
    +
    +env:
    +  RELEASE_BRANCH: "main"
    +
    +jobs:
    +  aggregate-release-data:
    +    runs-on: ubuntu-latest
    +
    +    outputs:
    +      commit_sha: ${{ steps.resolve-commit-sha.outputs.release_commit }}
    +      version_number: ${{ steps.nightly-release-version.outputs.number }}
    +      release_branch: ${{ steps.release-branch.outputs.name }}
    +
    +    steps:
    +      - name: "Checkout ${{ github.repository }} Branch ${{ env.RELEASE_BRANCH }}"
    +        uses: actions/checkout@v3
    +        with:
    +          ref: ${{ env.RELEASE_BRANCH }}
    +
    +      - name: "Resolve Commit To Release"
    +        id: resolve-commit-sha
    +        run: |
    +          commit_sha=$(git rev-parse HEAD)
    +          echo "release_commit=$commit_sha" >> $GITHUB_OUTPUT
    +
    +      - name: "Get Current Version Number"
    +        id: version-number-sources
    +        run: |
    +          current_version=`awk -F"current_version = " '{print $2}' .bumpversion.cfg | tr '\n' ' '`
    +          echo "current_version=$current_version" >> $GITHUB_OUTPUT
    +
    +      - name: "Audit Version And Parse Into Parts"
    +        id: semver
    +        uses: dbt-labs/actions/parse-semver@v1.1.0
    +        with:
    +          version: ${{ steps.version-number-sources.outputs.current_version }}
    +
    +      - name: "Get Current Date"
    +        id: current-date
    +        run: echo "date=$(date +'%m%d%Y')" >> $GITHUB_OUTPUT
    +
    +      - name: "Generate Nightly Release Version Number"
    +        id: nightly-release-version
    +        run: |
    +          number="${{ steps.semver.outputs.version }}.dev${{ steps.current-date.outputs.date }}+nightly"
    +          echo "number=$number" >> $GITHUB_OUTPUT
    +
    +      - name: "Audit Nightly Release Version And Parse Into Parts"
    +        uses: dbt-labs/actions/parse-semver@v1.1.0
    +        with:
    +          version: ${{ steps.nightly-release-version.outputs.number }}
    +
    +      - name: "Set Release Branch"
    +        id: release-branch
    +        run: |
    +          echo "name=${{ env.RELEASE_BRANCH }}" >> $GITHUB_OUTPUT
    +
    +  log-outputs-aggregate-release-data:
    +    runs-on: ubuntu-latest
    +    needs: [aggregate-release-data]
    +
    +    steps:
    +      - name: "[DEBUG] Log Outputs"
    +        run: |
    +          echo commit_sha    : ${{ needs.aggregate-release-data.outputs.commit_sha }}
    +          echo version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
    +          echo release_branch: ${{ needs.aggregate-release-data.outputs.release_branch }}
    +
    +  release-github-pypi:
    +    needs: [aggregate-release-data]
    +
    +    uses: ./.github/workflows/release.yml
    +    with:
    +      sha: ${{ needs.aggregate-release-data.outputs.commit_sha }}
    +      target_branch: ${{ needs.aggregate-release-data.outputs.release-branch }}
    +      version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
    +      build_script_path: "scripts/build-dist.sh"
    +      env_setup_script_path: "scripts/env-setup.sh"
    +      s3_bucket_name: "core-team-artifacts"
    +      package_test_command: "dbt --version"
    +      test_run: true
    +      nightly_release: true
    +    secrets: inherit
    diff --git a/.github/workflows/release-branch-tests.yml b/.github/workflows/release-branch-tests.yml
    index 3b329f17b6c..bdd01aa495a 100644
    --- a/.github/workflows/release-branch-tests.yml
    +++ b/.github/workflows/release-branch-tests.yml
    @@ -39,7 +39,7 @@ jobs:
           max-parallel: 1
           fail-fast: false
           matrix:
    -        branch: [1.0.latest, 1.1.latest, 1.2.latest, 1.3.latest, main]
    +        branch: [1.0.latest, 1.1.latest, 1.2.latest, 1.3.latest, 1.4.latest, main]
     
         steps:
         - name: Call CI workflow for ${{ matrix.branch }} branch
    diff --git a/.github/workflows/release-docker.yml b/.github/workflows/release-docker.yml
    index f47f110aeb1..f7b8dc29543 100644
    --- a/.github/workflows/release-docker.yml
    +++ b/.github/workflows/release-docker.yml
    @@ -41,9 +41,9 @@ jobs:
             id: version
             run: |
               IFS="." read -r MAJOR MINOR PATCH <<< ${{ github.event.inputs.version_number }}
    -          echo "::set-output name=major::$MAJOR"
    -          echo "::set-output name=minor::$MINOR"
    -          echo "::set-output name=patch::$PATCH"
    +          echo "major=$MAJOR" >> $GITHUB_OUTPUT
    +          echo "minor=$MINOR" >> $GITHUB_OUTPUT
    +          echo "patch=$PATCH" >> $GITHUB_OUTPUT
     
           - name: Is pkg 'latest'
             id: latest
    @@ -70,8 +70,10 @@ jobs:
           - name: Get docker build arg
             id: build_arg
             run: |
    -          echo "::set-output name=build_arg_name::"$(echo ${{ github.event.inputs.package }} | sed 's/\-/_/g')
    -          echo "::set-output name=build_arg_value::"$(echo ${{ github.event.inputs.package }} | sed 's/postgres/core/g')
    +          BUILD_ARG_NAME=$(echo ${{ github.event.inputs.package }} | sed 's/\-/_/g')
    +          BUILD_ARG_VALUE=$(echo ${{ github.event.inputs.package }} | sed 's/postgres/core/g')
    +          echo "build_arg_name=$BUILD_ARG_NAME" >> $GITHUB_OUTPUT
    +          echo "build_arg_value=$BUILD_ARG_VALUE" >> $GITHUB_OUTPUT
     
           - name: Log in to the GHCR
             uses: docker/login-action@v1
    diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
    index 1abab3e5013..043f0a3d520 100644
    --- a/.github/workflows/release.yml
    +++ b/.github/workflows/release.yml
    @@ -1,24 +1,110 @@
     # **what?**
    -# Take the given commit, run unit tests specifically on that sha, build and
    -# package it, and then release to GitHub and PyPi with that specific build
    -
    +# Release workflow provides the following steps:
    +# - checkout the given commit;
    +# - validate version in sources and changelog file for given version;
    +# - bump the version and generate a changelog if needed;
    +# - merge all changes to the target branch if needed;
    +# - run unit and integration tests against given commit;
    +# - build and package that SHA;
    +# - release it to GitHub and PyPI with that specific build;
    +#
     # **why?**
     # Ensure an automated and tested release process
    -
    +#
     # **when?**
    -# This will only run manually with a given sha and version
    +# This workflow can be run manually on demand or can be called by other workflows
     
    -name: Release to GitHub and PyPi
    +name: Release to GitHub and PyPI
     
     on:
       workflow_dispatch:
         inputs:
           sha:
    -       description: 'The last commit sha in the release'
    -       required: true
    +        description: "The last commit sha in the release"
    +        type: string
    +        required: true
    +      target_branch:
    +        description: "The branch to release from"
    +        type: string
    +        required: true
    +      version_number:
    +        description: "The release version number (i.e. 1.0.0b1)"
    +        type: string
    +        required: true
    +      build_script_path:
    +        description: "Build script path"
    +        type: string
    +        default: "scripts/build-dist.sh"
    +        required: true
    +      env_setup_script_path:
    +        description: "Environment setup script path"
    +        type: string
    +        default: "scripts/env-setup.sh"
    +        required: false
    +      s3_bucket_name:
    +        description: "AWS S3 bucket name"
    +        type: string
    +        default: "core-team-artifacts"
    +        required: true
    +      package_test_command:
    +        description: "Package test command"
    +        type: string
    +        default: "dbt --version"
    +        required: true
    +      test_run:
    +        description: "Test run (Publish release as draft)"
    +        type: boolean
    +        default: true
    +        required: false
    +      nightly_release:
    +        description: "Nightly release to dev environment"
    +        type: boolean
    +        default: false
    +        required: false
    +  workflow_call:
    +    inputs:
    +      sha:
    +        description: "The last commit sha in the release"
    +        type: string
    +        required: true
    +      target_branch:
    +        description: "The branch to release from"
    +        type: string
    +        required: true
           version_number:
    -       description: 'The release version number (i.e. 1.0.0b1)'
    -       required: true
    +        description: "The release version number (i.e. 1.0.0b1)"
    +        type: string
    +        required: true
    +      build_script_path:
    +        description: "Build script path"
    +        type: string
    +        default: "scripts/build-dist.sh"
    +        required: true
    +      env_setup_script_path:
    +        description: "Environment setup script path"
    +        type: string
    +        default: "scripts/env-setup.sh"
    +        required: false
    +      s3_bucket_name:
    +        description: "AWS S3 bucket name"
    +        type: string
    +        default: "core-team-artifacts"
    +        required: true
    +      package_test_command:
    +        description: "Package test command"
    +        type: string
    +        default: "dbt --version"
    +        required: true
    +      test_run:
    +        description: "Test run (Publish release as draft)"
    +        type: boolean
    +        default: true
    +        required: false
    +      nightly_release:
    +        description: "Nightly release to dev environment"
    +        type: boolean
    +        default: false
    +        required: false
     
     permissions:
       contents: write # this is the permission that allows creating a new release
    @@ -28,175 +114,117 @@ defaults:
         shell: bash
     
     jobs:
    -  unit:
    -    name: Unit test
    -
    +  log-inputs:
    +    name: Log Inputs
         runs-on: ubuntu-latest
    -
    -    env:
    -      TOXENV: "unit"
    -
         steps:
    -      - name: Check out the repository
    -        uses: actions/checkout@v2
    -        with:
    -          persist-credentials: false
    -          ref: ${{ github.event.inputs.sha }}
    -
    -      - name: Set up Python
    -        uses: actions/setup-python@v2
    -        with:
    -          python-version: 3.8
    -
    -      - name: Install python dependencies
    +      - name: "[DEBUG] Print Variables"
             run: |
    -          pip install --user --upgrade pip
    -          pip install tox
    -          pip --version
    -          tox --version
    -
    -      - name: Run tox
    -        run: tox
    -
    -  build:
    -    name: build packages
    +          echo The last commit sha in the release: ${{ inputs.sha }}
    +          echo The branch to release from:         ${{ inputs.target_branch }}
    +          echo The release version number:         ${{ inputs.version_number }}
    +          echo Build script path:                  ${{ inputs.build_script_path }}
    +          echo Environment setup script path:      ${{ inputs.env_setup_script_path }}
    +          echo AWS S3 bucket name:                 ${{ inputs.s3_bucket_name }}
    +          echo Package test command:               ${{ inputs.package_test_command }}
    +          echo Test run:                           ${{ inputs.test_run }}
    +          echo Nightly release:                    ${{ inputs.nightly_release }}
    +
    +  bump-version-generate-changelog:
    +    name: Bump package version, Generate changelog
    +
    +    uses: dbt-labs/dbt-release/.github/workflows/release-prep.yml@main
    +
    +    with:
    +      sha: ${{ inputs.sha }}
    +      version_number: ${{ inputs.version_number }}
    +      target_branch: ${{ inputs.target_branch }}
    +      env_setup_script_path: ${{ inputs.env_setup_script_path }}
    +      test_run: ${{ inputs.test_run }}
    +      nightly_release: ${{ inputs.nightly_release }}
    +
    +    secrets:
    +      FISHTOWN_BOT_PAT: ${{ secrets.FISHTOWN_BOT_PAT }}
    +
    +  log-outputs-bump-version-generate-changelog:
    +    name: "[Log output] Bump package version, Generate changelog"
    +    if: ${{ !failure() && !cancelled() }}
    +
    +    needs: [bump-version-generate-changelog]
     
         runs-on: ubuntu-latest
     
         steps:
    -      - name: Check out the repository
    -        uses: actions/checkout@v2
    -        with:
    -          persist-credentials: false
    -          ref: ${{ github.event.inputs.sha }}
    -
    -      - name: Set up Python
    -        uses: actions/setup-python@v2
    -        with:
    -          python-version: 3.8
    -
    -      - name: Install python dependencies
    +      - name: Print variables
             run: |
    -          pip install --user --upgrade pip
    -          pip install --upgrade setuptools wheel twine check-wheel-contents
    -          pip --version
    -
    -      - name: Build distributions
    -        run: ./scripts/build-dist.sh
    -
    -      - name: Show distributions
    -        run: ls -lh dist/
    -
    -      - name: Check distribution descriptions
    -        run: |
    -          twine check dist/*
    -
    -      - name: Check wheel contents
    -        run: |
    -          check-wheel-contents dist/*.whl --ignore W007,W008
    -
    -      - uses: actions/upload-artifact@v2
    -        with:
    -          name: dist
    -          path: |
    -            dist/
    -            !dist/dbt-${{github.event.inputs.version_number}}.tar.gz
    -
    -  test-build:
    -    name: verify packages
    -
    -    needs: [build, unit]
    -
    -    runs-on: ubuntu-latest
    +          echo Final SHA     : ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
    +          echo Changelog path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
    +
    +  build-test-package:
    +    name: Build, Test, Package
    +    if: ${{ !failure() && !cancelled() }}
    +    needs: [bump-version-generate-changelog]
    +
    +    uses: dbt-labs/dbt-release/.github/workflows/build.yml@main
    +
    +    with:
    +      sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
    +      version_number: ${{ inputs.version_number }}
    +      changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
    +      build_script_path: ${{ inputs.build_script_path }}
    +      s3_bucket_name: ${{ inputs.s3_bucket_name }}
    +      package_test_command: ${{ inputs.package_test_command }}
    +      test_run: ${{ inputs.test_run }}
    +      nightly_release: ${{ inputs.nightly_release }}
    +
    +    secrets:
    +      AWS_ACCESS_KEY_ID: ${{ secrets.PRODUCTION_AWS_ACCESS_KEY_ID }}
    +      AWS_SECRET_ACCESS_KEY: ${{ secrets.PRODUCTION_AWS_SECRET_ACCESS_KEY }}
     
    -    steps:
    -      - name: Set up Python
    -        uses: actions/setup-python@v2
    -        with:
    -          python-version: 3.8
    -
    -      - name: Install python dependencies
    -        run: |
    -          pip install --user --upgrade pip
    -          pip install --upgrade wheel
    -          pip --version
    -
    -      - uses: actions/download-artifact@v2
    -        with:
    -          name: dist
    -          path: dist/
    -
    -      - name: Show distributions
    -        run: ls -lh dist/
    +  github-release:
    +    name: GitHub Release
    +    if: ${{ !failure() && !cancelled() }}
     
    -      - name: Install wheel distributions
    -        run: |
    -          find ./dist/*.whl -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
    +    needs: [bump-version-generate-changelog, build-test-package]
     
    -      - name: Check wheel distributions
    -        run: |
    -          dbt --version
    +    uses: dbt-labs/dbt-release/.github/workflows/github-release.yml@main
     
    -      - name: Install source distributions
    -        run: |
    -          find ./dist/*.gz -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
    +    with:
    +      sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
    +      version_number: ${{ inputs.version_number }}
    +      changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
    +      test_run: ${{ inputs.test_run }}
     
    -      - name: Check source distributions
    -        run: |
    -          dbt --version
    +  pypi-release:
    +    name: PyPI Release
     
    -  github-release:
    -    name: GitHub Release
    +    needs: [github-release]
     
    -    needs: test-build
    +    uses: dbt-labs/dbt-release/.github/workflows/pypi-release.yml@main
     
    -    runs-on: ubuntu-latest
    +    with:
    +      version_number: ${{ inputs.version_number }}
    +      test_run: ${{ inputs.test_run }}
     
    -    steps:
    -      - uses: actions/download-artifact@v2
    -        with:
    -          name: dist
    -          path: '.'
    -
    -      # Need to set an output variable because env variables can't be taken as input
    -      # This is needed for the next step with releasing to GitHub
    -      - name: Find release type
    -        id: release_type
    -        env:
    -          IS_PRERELEASE: ${{ contains(github.event.inputs.version_number, 'rc') ||  contains(github.event.inputs.version_number, 'b') }}
    -        run: |
    -          echo ::set-output name=isPrerelease::$IS_PRERELEASE
    -
    -      - name: Creating GitHub Release
    -        uses: softprops/action-gh-release@v1
    -        with:
    -          name: dbt-core v${{github.event.inputs.version_number}}
    -          tag_name: v${{github.event.inputs.version_number}}
    -          prerelease: ${{ steps.release_type.outputs.isPrerelease }}
    -          target_commitish: ${{github.event.inputs.sha}}
    -          body: |
    -            [Release notes](https://github.com/dbt-labs/dbt-core/blob/main/CHANGELOG.md)
    -          files: |
    -            dbt_postgres-${{github.event.inputs.version_number}}-py3-none-any.whl
    -            dbt_core-${{github.event.inputs.version_number}}-py3-none-any.whl
    -            dbt-postgres-${{github.event.inputs.version_number}}.tar.gz
    -            dbt-core-${{github.event.inputs.version_number}}.tar.gz
    +    secrets:
    +      PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
    +      TEST_PYPI_API_TOKEN: ${{ secrets.TEST_PYPI_API_TOKEN }}
     
    -  pypi-release:
    -    name: Pypi release
    +  slack-notification:
    +    name: Slack Notification
    +    if: ${{ failure() }}
     
    -    runs-on: ubuntu-latest
    +    needs:
    +      [
    +        bump-version-generate-changelog,
    +        build-test-package,
    +        github-release,
    +        pypi-release,
    +      ]
     
    -    needs: github-release
    +    uses: dbt-labs/dbt-release/.github/workflows/slack-post-notification.yml@main
    +    with:
    +      status: "failure"
     
    -    environment: PypiProd
    -    steps:
    -      - uses: actions/download-artifact@v2
    -        with:
    -          name: dist
    -          path: 'dist'
    -
    -      - name: Publish distribution to PyPI
    -        uses: pypa/gh-action-pypi-publish@v1.4.2
    -        with:
    -          password: ${{ secrets.PYPI_API_TOKEN }}
    +    secrets:
    +      SLACK_WEBHOOK_URL: ${{ secrets.SLACK_DEV_CORE_ALERTS }}
    diff --git a/.github/workflows/version-bump.yml b/.github/workflows/version-bump.yml
    index 1a5be6aefb1..2bbaf1cef82 100644
    --- a/.github/workflows/version-bump.yml
    +++ b/.github/workflows/version-bump.yml
    @@ -65,7 +65,7 @@ jobs:
           - name: Set branch value
             id: variables
             run: |
    -          echo "::set-output name=BRANCH_NAME::prep-release/${{ github.event.inputs.version_number }}_$GITHUB_RUN_ID"
    +          echo "BRANCH_NAME=prep-release/${{ github.event.inputs.version_number }}_$GITHUB_RUN_ID" >> $GITHUB_OUTPUT
     
           - name: Create PR branch
             run: |
    diff --git a/CHANGELOG.md b/CHANGELOG.md
    index 4a91696f68b..45347e50b1f 100755
    --- a/CHANGELOG.md
    +++ b/CHANGELOG.md
    @@ -5,102 +5,12 @@
     - "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version.
     - Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry)
     
    -## dbt-core 1.4.0-b1 - December 15, 2022
    -
    -### Features
    -
    -- Added favor-state flag to optionally favor state nodes even if unselected node exists ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968))
    -- Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. ([#5610](https://github.com/dbt-labs/dbt-core/issues/5610))
    -- Friendlier error messages when packages.yml is malformed ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486))
    -- Migrate dbt-utils current_timestamp macros into core + adapters ([#5521](https://github.com/dbt-labs/dbt-core/issues/5521))
    -- Allow partitions in external tables to be supplied as a list ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929))
    -- extend -f flag shorthand for seed command ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990))
    -- This pulls the profile name from args when constructing a RuntimeConfig in lib.py, enabling the dbt-server to override the value that's in the dbt_project.yml ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201))
    -- Adding tarball install method for packages. Allowing package tarball to be specified via url in the packages.yaml. ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205))
    -- Added an md5 function to the base context ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246))
    -- Exposures support metrics in lineage ([#6057](https://github.com/dbt-labs/dbt-core/issues/6057))
    -- Add support for Python 3.11 ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147))
    -- incremental predicates ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680))
    -
    -### Fixes
    -
    -- Account for disabled flags on models in schema files more completely ([#3992](https://github.com/dbt-labs/dbt-core/issues/3992))
    -- Add validation of enabled config for metrics, exposures and sources ([#6030](https://github.com/dbt-labs/dbt-core/issues/6030))
    -- check length of args of python model function before accessing it ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041))
    -- Add functors to ensure event types with str-type attributes are initialized to spec, even when provided non-str type params. ([#5436](https://github.com/dbt-labs/dbt-core/issues/5436))
    -- Allow hooks to fail without halting execution flow ([#5625](https://github.com/dbt-labs/dbt-core/issues/5625))
    -- Clarify Error Message for how many models are allowed in a Python file ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245))
    -- After this, will be possible to use default values for dbt.config.get ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309))
    -- Use full path for writing manifest ([#6055](https://github.com/dbt-labs/dbt-core/issues/6055))
    -- [CT-1284] Change Python model default materialization to table ([#6345](https://github.com/dbt-labs/dbt-core/issues/6345))
    -- Repair a regression which prevented basic logging before the logging subsystem is completely configured. ([#6434](https://github.com/dbt-labs/dbt-core/issues/6434))
    -
    -### Docs
    -
    -- minor doc correction ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791))
    -- Generate API docs for new CLI interface ([dbt-docs/#5528](https://github.com/dbt-labs/dbt-docs/issues/5528))
    --  ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880))
    -- Fix rendering of sample code for metrics ([dbt-docs/#323](https://github.com/dbt-labs/dbt-docs/issues/323))
    -- Alphabetize `core/dbt/README.md` ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368))
    -
    -### Under the Hood
    -
    -- Put black config in explicit config ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946))
    -- Added flat_graph attribute the Manifest class's deepcopy() coverage ([#5809](https://github.com/dbt-labs/dbt-core/issues/5809))
    -- Add mypy configs so `mypy` passes from CLI ([#5983](https://github.com/dbt-labs/dbt-core/issues/5983))
    -- Exception message cleanup. ([#6023](https://github.com/dbt-labs/dbt-core/issues/6023))
    -- Add dmypy cache to gitignore ([#6028](https://github.com/dbt-labs/dbt-core/issues/6028))
    -- Provide useful errors when the value of 'materialized' is invalid ([#5229](https://github.com/dbt-labs/dbt-core/issues/5229))
    -- Clean up string formatting ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068))
    -- Fixed extra whitespace in strings introduced by black. ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350))
    -- Remove the 'root_path' field from most nodes ([#6171](https://github.com/dbt-labs/dbt-core/issues/6171))
    -- Combine certain logging events with different levels ([#6173](https://github.com/dbt-labs/dbt-core/issues/6173))
    -- Convert threading tests to pytest ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942))
    -- Convert postgres index tests to pytest ([#5770](https://github.com/dbt-labs/dbt-core/issues/5770))
    -- Convert use color tests to pytest ([#5771](https://github.com/dbt-labs/dbt-core/issues/5771))
    -- Add github actions workflow to generate high level CLI API docs ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942))
    -- Functionality-neutral refactor of event logging system to improve encapsulation and modularity. ([#6139](https://github.com/dbt-labs/dbt-core/issues/6139))
    -- Consolidate ParsedNode and CompiledNode classes ([#6383](https://github.com/dbt-labs/dbt-core/issues/6383))
    -- Prevent doc gen workflow from running on forks ([#6386](https://github.com/dbt-labs/dbt-core/issues/6386))
    -- Fix intermittent database connection failure in Windows CI test ([#6394](https://github.com/dbt-labs/dbt-core/issues/6394))
    -- Refactor and clean up manifest nodes ([#6426](https://github.com/dbt-labs/dbt-core/issues/6426))
    -- Restore important legacy logging behaviors, following refactor which removed them ([#6437](https://github.com/dbt-labs/dbt-core/issues/6437))
    -
    -### Dependencies
    -
    -- Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core ([#5917](https://github.com/dbt-labs/dbt-core/pull/5917))
    -- Bump black from 22.8.0 to 22.10.0 ([#6019](https://github.com/dbt-labs/dbt-core/pull/6019))
    -- Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core ([#6108](https://github.com/dbt-labs/dbt-core/pull/6108))
    -- Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core ([#6144](https://github.com/dbt-labs/dbt-core/pull/6144))
    -- Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core ([#4904](https://github.com/dbt-labs/dbt-core/issues/4904))
    -
    -### Contributors
    -- [@andy-clapson](https://github.com/andy-clapson) ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791))
    -- [@chamini2](https://github.com/chamini2) ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041))
    -- [@daniel-murray](https://github.com/daniel-murray) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968))
    -- [@dave-connors-3](https://github.com/dave-connors-3) ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990))
    -- [@dbeatty10](https://github.com/dbeatty10) ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368), [#6394](https://github.com/dbt-labs/dbt-core/issues/6394))
    -- [@devmessias](https://github.com/devmessias) ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309))
    -- [@eve-johns](https://github.com/eve-johns) ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068))
    -- [@haritamar](https://github.com/haritamar) ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246))
    -- [@jared-rimmer](https://github.com/jared-rimmer) ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486))
    -- [@josephberni](https://github.com/josephberni) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968))
    -- [@joshuataylor](https://github.com/joshuataylor) ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147))
    -- [@justbldwn](https://github.com/justbldwn) ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245))
    -- [@luke-bassett](https://github.com/luke-bassett) ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350))
    -- [@max-sixty](https://github.com/max-sixty) ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946), [#5983](https://github.com/dbt-labs/dbt-core/issues/5983), [#6028](https://github.com/dbt-labs/dbt-core/issues/6028))
    -- [@paulbenschmidt](https://github.com/paulbenschmidt) ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880))
    -- [@pgoslatara](https://github.com/pgoslatara) ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929))
    -- [@racheldaniel](https://github.com/racheldaniel) ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201))
    -- [@timle2](https://github.com/timle2) ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205))
    -- [@dave-connors-3](https://github.com/dave-connors-3) ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680))
    -
    -
     ## Previous Releases
     
     For information on prior major and minor releases, see their changelogs:
     
     
    +* [1.4](https://github.com/dbt-labs/dbt-core/blob/1.4.latest/CHANGELOG.md)
     * [1.3](https://github.com/dbt-labs/dbt-core/blob/1.3.latest/CHANGELOG.md)
     * [1.2](https://github.com/dbt-labs/dbt-core/blob/1.2.latest/CHANGELOG.md)
     * [1.1](https://github.com/dbt-labs/dbt-core/blob/1.1.latest/CHANGELOG.md)
    diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
    index 3bbd8d14d5f..893979fd9ac 100644
    --- a/CONTRIBUTING.md
    +++ b/CONTRIBUTING.md
    @@ -96,12 +96,15 @@ brew install postgresql
     
     ### Installation
     
    -First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment).  Also ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `dbt-core` (and its dependencies) with:
    +First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment).  Also ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `dbt-core` (and its dependencies):
     
     ```sh
     make dev
    -# or
    +```
    +or, alternatively:
    +```sh
     pip install -r dev-requirements.txt -r editable-requirements.txt
    +pre-commit install
     ```
     
     When installed in this way, any changes you make to your local copy of the source code will be reflected immediately in your next `dbt` run.
    diff --git a/Makefile b/Makefile
    index 90510ea3855..566c4de9e4d 100644
    --- a/Makefile
    +++ b/Makefile
    @@ -19,11 +19,16 @@ CI_FLAGS =\
     	LOG_DIR=./logs\
     	DBT_LOG_FORMAT=json
     
    -.PHONY: dev
    -dev: ## Installs dbt-* packages in develop mode along with development dependencies.
    +.PHONY: dev_req
    +dev_req: ## Installs dbt-* packages in develop mode along with only development dependencies.
     	@\
     	pip install -r dev-requirements.txt -r editable-requirements.txt
     
    +.PHONY: dev
    +dev: dev_req ## Installs dbt-* packages in develop mode along with development dependencies and pre-commit.
    +	@\
    +	pre-commit install
    +
     .PHONY: mypy
     mypy: .env ## Runs mypy against staged changes for static type checking.
     	@\
    diff --git a/core/dbt/adapters/base/column.py b/core/dbt/adapters/base/column.py
    index b47aac64062..3c6246b33a6 100644
    --- a/core/dbt/adapters/base/column.py
    +++ b/core/dbt/adapters/base/column.py
    @@ -2,7 +2,7 @@
     import re
     from typing import Dict, ClassVar, Any, Optional
     
    -from dbt.exceptions import RuntimeException
    +from dbt.exceptions import DbtRuntimeError
     
     
     @dataclass
    @@ -85,7 +85,7 @@ def is_numeric(self) -> bool:
     
         def string_size(self) -> int:
             if not self.is_string():
    -            raise RuntimeException("Called string_size() on non-string field!")
    +            raise DbtRuntimeError("Called string_size() on non-string field!")
     
             if self.dtype == "text" or self.char_size is None:
                 # char_size should never be None. Handle it reasonably just in case
    @@ -124,7 +124,7 @@ def __repr__(self) -> str:
         def from_description(cls, name: str, raw_data_type: str) -> "Column":
             match = re.match(r"([^(]+)(\([^)]+\))?", raw_data_type)
             if match is None:
    -            raise RuntimeException(f'Could not interpret data type "{raw_data_type}"')
    +            raise DbtRuntimeError(f'Could not interpret data type "{raw_data_type}"')
             data_type, size_info = match.groups()
             char_size = None
             numeric_precision = None
    @@ -137,7 +137,7 @@ def from_description(cls, name: str, raw_data_type: str) -> "Column":
                     try:
                         char_size = int(parts[0])
                     except ValueError:
    -                    raise RuntimeException(
    +                    raise DbtRuntimeError(
                             f'Could not interpret data_type "{raw_data_type}": '
                             f'could not convert "{parts[0]}" to an integer'
                         )
    @@ -145,14 +145,14 @@ def from_description(cls, name: str, raw_data_type: str) -> "Column":
                     try:
                         numeric_precision = int(parts[0])
                     except ValueError:
    -                    raise RuntimeException(
    +                    raise DbtRuntimeError(
                             f'Could not interpret data_type "{raw_data_type}": '
                             f'could not convert "{parts[0]}" to an integer'
                         )
                     try:
                         numeric_scale = int(parts[1])
                     except ValueError:
    -                    raise RuntimeException(
    +                    raise DbtRuntimeError(
                             f'Could not interpret data_type "{raw_data_type}": '
                             f'could not convert "{parts[1]}" to an integer'
                         )
    diff --git a/core/dbt/adapters/base/connections.py b/core/dbt/adapters/base/connections.py
    index 577cdf6d9a6..d449b27e5e6 100644
    --- a/core/dbt/adapters/base/connections.py
    +++ b/core/dbt/adapters/base/connections.py
    @@ -91,13 +91,13 @@ def get_thread_connection(self) -> Connection:
             key = self.get_thread_identifier()
             with self.lock:
                 if key not in self.thread_connections:
    -                raise dbt.exceptions.InvalidConnectionException(key, list(self.thread_connections))
    +                raise dbt.exceptions.InvalidConnectionError(key, list(self.thread_connections))
                 return self.thread_connections[key]
     
         def set_thread_connection(self, conn: Connection) -> None:
             key = self.get_thread_identifier()
             if key in self.thread_connections:
    -            raise dbt.exceptions.InternalException(
    +            raise dbt.exceptions.DbtInternalError(
                     "In set_thread_connection, existing connection exists for {}"
                 )
             self.thread_connections[key] = conn
    @@ -137,49 +137,49 @@ def exception_handler(self, sql: str) -> ContextManager:
             :return: A context manager that handles exceptions raised by the
                 underlying database.
             """
    -        raise dbt.exceptions.NotImplementedException(
    +        raise dbt.exceptions.NotImplementedError(
                 "`exception_handler` is not implemented for this adapter!"
             )
     
         def set_connection_name(self, name: Optional[str] = None) -> Connection:
    -        conn_name: str
    -        if name is None:
    -            # if a name isn't specified, we'll re-use a single handle
    -            # named 'master'
    -            conn_name = "master"
    -        else:
    -            if not isinstance(name, str):
    -                raise dbt.exceptions.CompilerException(
    -                    f"For connection name, got {name} - not a string!"
    -                )
    -            assert isinstance(name, str)
    -            conn_name = name
    +        """Called by 'acquire_connection' in BaseAdapter, which is called by
    +        'connection_named', called by 'connection_for(node)'.
    +        Creates a connection for this thread if one doesn't already
    +        exist, and will rename an existing connection."""
    +
    +        conn_name: str = "master" if name is None else name
     
    +        # Get a connection for this thread
             conn = self.get_if_exists()
    +
    +        if conn and conn.name == conn_name and conn.state == "open":
    +            # Found a connection and nothing to do, so just return it
    +            return conn
    +
             if conn is None:
    +            # Create a new connection
                 conn = Connection(
                     type=Identifier(self.TYPE),
    -                name=None,
    +                name=conn_name,
                     state=ConnectionState.INIT,
                     transaction_open=False,
                     handle=None,
                     credentials=self.profile.credentials,
                 )
    -            self.set_thread_connection(conn)
    -
    -        if conn.name == conn_name and conn.state == "open":
    -            return conn
    -
    -        fire_event(
    -            NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info())
    -        )
    -
    -        if conn.state == "open":
    -            fire_event(ConnectionReused(conn_name=conn_name))
    -        else:
                 conn.handle = LazyHandle(self.open)
    +            # Add the connection to thread_connections for this thread
    +            self.set_thread_connection(conn)
    +            fire_event(
    +                NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info())
    +            )
    +        else:  # existing connection either wasn't open or didn't have the right name
    +            if conn.state != "open":
    +                conn.handle = LazyHandle(self.open)
    +            if conn.name != conn_name:
    +                orig_conn_name: str = conn.name or ""
    +                conn.name = conn_name
    +                fire_event(ConnectionReused(orig_conn_name=orig_conn_name, conn_name=conn_name))
     
    -        conn.name = conn_name
             return conn
     
         @classmethod
    @@ -211,7 +211,7 @@ def retry_connection(
                 connect should trigger a retry.
             :type retryable_exceptions: Iterable[Type[Exception]]
             :param int retry_limit: How many times to retry the call to connect. If this limit
    -            is exceeded before a successful call, a FailedToConnectException will be raised.
    +            is exceeded before a successful call, a FailedToConnectError will be raised.
                 Must be non-negative.
             :param retry_timeout: Time to wait between attempts to connect. Can also take a
                 Callable that takes the number of attempts so far, beginning at 0, and returns an int
    @@ -220,14 +220,14 @@ def retry_connection(
             :param int _attempts: Parameter used to keep track of the number of attempts in calling the
                 connect function across recursive calls. Passed as an argument to retry_timeout if it
                 is a Callable. This parameter should not be set by the initial caller.
    -        :raises dbt.exceptions.FailedToConnectException: Upon exhausting all retry attempts without
    +        :raises dbt.exceptions.FailedToConnectError: Upon exhausting all retry attempts without
                 successfully acquiring a handle.
             :return: The given connection with its appropriate state and handle attributes set
                 depending on whether we successfully acquired a handle or not.
             """
             timeout = retry_timeout(_attempts) if callable(retry_timeout) else retry_timeout
             if timeout < 0:
    -            raise dbt.exceptions.FailedToConnectException(
    +            raise dbt.exceptions.FailedToConnectError(
                     "retry_timeout cannot be negative or return a negative time."
                 )
     
    @@ -235,7 +235,7 @@ def retry_connection(
                 # This guard is not perfect others may add to the recursion limit (e.g. built-ins).
                 connection.handle = None
                 connection.state = ConnectionState.FAIL
    -            raise dbt.exceptions.FailedToConnectException("retry_limit cannot be negative")
    +            raise dbt.exceptions.FailedToConnectError("retry_limit cannot be negative")
     
             try:
                 connection.handle = connect()
    @@ -246,7 +246,7 @@ def retry_connection(
                 if retry_limit <= 0:
                     connection.handle = None
                     connection.state = ConnectionState.FAIL
    -                raise dbt.exceptions.FailedToConnectException(str(e))
    +                raise dbt.exceptions.FailedToConnectError(str(e))
     
                 logger.debug(
                     f"Got a retryable error when attempting to open a {cls.TYPE} connection.\n"
    @@ -268,12 +268,12 @@ def retry_connection(
             except Exception as e:
                 connection.handle = None
                 connection.state = ConnectionState.FAIL
    -            raise dbt.exceptions.FailedToConnectException(str(e))
    +            raise dbt.exceptions.FailedToConnectError(str(e))
     
         @abc.abstractmethod
         def cancel_open(self) -> Optional[List[str]]:
             """Cancel all open connections on the adapter. (passable)"""
    -        raise dbt.exceptions.NotImplementedException(
    +        raise dbt.exceptions.NotImplementedError(
                 "`cancel_open` is not implemented for this adapter!"
             )
     
    @@ -288,7 +288,7 @@ def open(cls, connection: Connection) -> Connection:
             This should be thread-safe, or hold the lock if necessary. The given
             connection should not be in either in_use or available.
             """
    -        raise dbt.exceptions.NotImplementedException("`open` is not implemented for this adapter!")
    +        raise dbt.exceptions.NotImplementedError("`open` is not implemented for this adapter!")
     
         def release(self) -> None:
             with self.lock:
    @@ -320,16 +320,12 @@ def cleanup_all(self) -> None:
         @abc.abstractmethod
         def begin(self) -> None:
             """Begin a transaction. (passable)"""
    -        raise dbt.exceptions.NotImplementedException(
    -            "`begin` is not implemented for this adapter!"
    -        )
    +        raise dbt.exceptions.NotImplementedError("`begin` is not implemented for this adapter!")
     
         @abc.abstractmethod
         def commit(self) -> None:
             """Commit a transaction. (passable)"""
    -        raise dbt.exceptions.NotImplementedException(
    -            "`commit` is not implemented for this adapter!"
    -        )
    +        raise dbt.exceptions.NotImplementedError("`commit` is not implemented for this adapter!")
     
         @classmethod
         def _rollback_handle(cls, connection: Connection) -> None:
    @@ -365,7 +361,7 @@ def _close_handle(cls, connection: Connection) -> None:
         def _rollback(cls, connection: Connection) -> None:
             """Roll back the given connection."""
             if connection.transaction_open is False:
    -            raise dbt.exceptions.InternalException(
    +            raise dbt.exceptions.DbtInternalError(
                     f"Tried to rollback transaction on connection "
                     f'"{connection.name}", but it does not have one open!'
                 )
    @@ -415,6 +411,4 @@ def execute(
             :return: A tuple of the query status and results (empty if fetch=False).
             :rtype: Tuple[AdapterResponse, agate.Table]
             """
    -        raise dbt.exceptions.NotImplementedException(
    -            "`execute` is not implemented for this adapter!"
    -        )
    +        raise dbt.exceptions.NotImplementedError("`execute` is not implemented for this adapter!")
    diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py
    index 64ebbeac5dd..8234f90910c 100644
    --- a/core/dbt/adapters/base/impl.py
    +++ b/core/dbt/adapters/base/impl.py
    @@ -17,25 +17,24 @@
         Iterator,
         Set,
     )
    -
     import agate
     import pytz
     
     from dbt.exceptions import (
    -    InternalException,
    -    InvalidMacroArgType,
    -    InvalidMacroResult,
    -    InvalidQuoteConfigType,
    -    NotImplementedException,
    -    NullRelationCacheAttempted,
    -    NullRelationDropAttempted,
    -    RelationReturnedMultipleResults,
    -    RenameToNoneAttempted,
    -    RuntimeException,
    -    SnapshotTargetIncomplete,
    -    SnapshotTargetNotSnapshotTable,
    -    UnexpectedNull,
    -    UnexpectedNonTimestamp,
    +    DbtInternalError,
    +    MacroArgTypeError,
    +    MacroResultError,
    +    QuoteConfigTypeError,
    +    NotImplementedError,
    +    NullRelationCacheAttemptedError,
    +    NullRelationDropAttemptedError,
    +    RelationReturnedMultipleResultsError,
    +    RenameToNoneAttemptedError,
    +    DbtRuntimeError,
    +    SnapshotTargetIncompleteError,
    +    SnapshotTargetNotSnapshotTableError,
    +    UnexpectedNullError,
    +    UnexpectedNonTimestampError,
     )
     
     from dbt.adapters.protocol import (
    @@ -54,7 +53,7 @@
         CodeExecutionStatus,
         CatalogGenerationError,
     )
    -from dbt.utils import filter_null_values, executor, cast_to_str
    +from dbt.utils import filter_null_values, executor, cast_to_str, AttrDict
     
     from dbt.adapters.base.connections import Connection, AdapterResponse
     from dbt.adapters.base.meta import AdapterMeta, available
    @@ -75,7 +74,7 @@
     
     def _expect_row_value(key: str, row: agate.Row):
         if key not in row.keys():
    -        raise InternalException(
    +        raise DbtInternalError(
                 'Got a row without "{}" column, columns: {}'.format(key, row.keys())
             )
         return row[key]
    @@ -104,10 +103,10 @@ def _utc(dt: Optional[datetime], source: BaseRelation, field_name: str) -> datet
         assume the datetime is already for UTC and add the timezone.
         """
         if dt is None:
    -        raise UnexpectedNull(field_name, source)
    +        raise UnexpectedNullError(field_name, source)
     
         elif not hasattr(dt, "tzinfo"):
    -        raise UnexpectedNonTimestamp(field_name, source, dt)
    +        raise UnexpectedNonTimestampError(field_name, source, dt)
     
         elif dt.tzinfo:
             return dt.astimezone(pytz.UTC)
    @@ -433,7 +432,7 @@ def cache_added(self, relation: Optional[BaseRelation]) -> str:
             """Cache a new relation in dbt. It will show up in `list relations`."""
             if relation is None:
                 name = self.nice_connection_name()
    -            raise NullRelationCacheAttempted(name)
    +            raise NullRelationCacheAttemptedError(name)
             self.cache.add(relation)
             # so jinja doesn't render things
             return ""
    @@ -445,7 +444,7 @@ def cache_dropped(self, relation: Optional[BaseRelation]) -> str:
             """
             if relation is None:
                 name = self.nice_connection_name()
    -            raise NullRelationDropAttempted(name)
    +            raise NullRelationDropAttemptedError(name)
             self.cache.drop(relation)
             return ""
     
    @@ -462,7 +461,7 @@ def cache_renamed(
                 name = self.nice_connection_name()
                 src_name = _relation_name(from_relation)
                 dst_name = _relation_name(to_relation)
    -            raise RenameToNoneAttempted(src_name, dst_name, name)
    +            raise RenameToNoneAttemptedError(src_name, dst_name, name)
     
             self.cache.rename(from_relation, to_relation)
             return ""
    @@ -474,12 +473,12 @@ def cache_renamed(
         @abc.abstractmethod
         def date_function(cls) -> str:
             """Get the date function used by this adapter's database."""
    -        raise NotImplementedException("`date_function` is not implemented for this adapter!")
    +        raise NotImplementedError("`date_function` is not implemented for this adapter!")
     
         @classmethod
         @abc.abstractmethod
         def is_cancelable(cls) -> bool:
    -        raise NotImplementedException("`is_cancelable` is not implemented for this adapter!")
    +        raise NotImplementedError("`is_cancelable` is not implemented for this adapter!")
     
         ###
         # Abstract methods about schemas
    @@ -487,7 +486,7 @@ def is_cancelable(cls) -> bool:
         @abc.abstractmethod
         def list_schemas(self, database: str) -> List[str]:
             """Get a list of existing schemas in database"""
    -        raise NotImplementedException("`list_schemas` is not implemented for this adapter!")
    +        raise NotImplementedError("`list_schemas` is not implemented for this adapter!")
     
         @available.parse(lambda *a, **k: False)
         def check_schema_exists(self, database: str, schema: str) -> bool:
    @@ -510,13 +509,13 @@ def drop_relation(self, relation: BaseRelation) -> None:
     
             *Implementors must call self.cache.drop() to preserve cache state!*
             """
    -        raise NotImplementedException("`drop_relation` is not implemented for this adapter!")
    +        raise NotImplementedError("`drop_relation` is not implemented for this adapter!")
     
         @abc.abstractmethod
         @available.parse_none
         def truncate_relation(self, relation: BaseRelation) -> None:
             """Truncate the given relation."""
    -        raise NotImplementedException("`truncate_relation` is not implemented for this adapter!")
    +        raise NotImplementedError("`truncate_relation` is not implemented for this adapter!")
     
         @abc.abstractmethod
         @available.parse_none
    @@ -525,15 +524,13 @@ def rename_relation(self, from_relation: BaseRelation, to_relation: BaseRelation
     
             Implementors must call self.cache.rename() to preserve cache state.
             """
    -        raise NotImplementedException("`rename_relation` is not implemented for this adapter!")
    +        raise NotImplementedError("`rename_relation` is not implemented for this adapter!")
     
         @abc.abstractmethod
         @available.parse_list
         def get_columns_in_relation(self, relation: BaseRelation) -> List[BaseColumn]:
             """Get a list of the columns in the given Relation."""
    -        raise NotImplementedException(
    -            "`get_columns_in_relation` is not implemented for this adapter!"
    -        )
    +        raise NotImplementedError("`get_columns_in_relation` is not implemented for this adapter!")
     
         @available.deprecated("get_columns_in_relation", lambda *a, **k: [])
         def get_columns_in_table(self, schema: str, identifier: str) -> List[BaseColumn]:
    @@ -555,7 +552,7 @@ def expand_column_types(self, goal: BaseRelation, current: BaseRelation) -> None
             :param self.Relation current: A relation that currently exists in the
                 database with columns of unspecified types.
             """
    -        raise NotImplementedException(
    +        raise NotImplementedError(
                 "`expand_target_column_types` is not implemented for this adapter!"
             )
     
    @@ -570,7 +567,7 @@ def list_relations_without_caching(self, schema_relation: BaseRelation) -> List[
             :return: The relations in schema
             :rtype: List[self.Relation]
             """
    -        raise NotImplementedException(
    +        raise NotImplementedError(
                 "`list_relations_without_caching` is not implemented for this adapter!"
             )
     
    @@ -612,7 +609,7 @@ def get_missing_columns(
             to_relation.
             """
             if not isinstance(from_relation, self.Relation):
    -            raise InvalidMacroArgType(
    +            raise MacroArgTypeError(
                     method_name="get_missing_columns",
                     arg_name="from_relation",
                     got_value=from_relation,
    @@ -620,7 +617,7 @@ def get_missing_columns(
                 )
     
             if not isinstance(to_relation, self.Relation):
    -            raise InvalidMacroArgType(
    +            raise MacroArgTypeError(
                     method_name="get_missing_columns",
                     arg_name="to_relation",
                     got_value=to_relation,
    @@ -641,11 +638,11 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None:
             expected columns.
     
             :param Relation relation: The relation to check
    -        :raises CompilationException: If the columns are
    +        :raises InvalidMacroArgType: If the columns are
                 incorrect.
             """
             if not isinstance(relation, self.Relation):
    -            raise InvalidMacroArgType(
    +            raise MacroArgTypeError(
                     method_name="valid_snapshot_target",
                     arg_name="relation",
                     got_value=relation,
    @@ -666,16 +663,16 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None:
     
             if missing:
                 if extra:
    -                raise SnapshotTargetIncomplete(extra, missing)
    +                raise SnapshotTargetIncompleteError(extra, missing)
                 else:
    -                raise SnapshotTargetNotSnapshotTable(missing)
    +                raise SnapshotTargetNotSnapshotTableError(missing)
     
         @available.parse_none
         def expand_target_column_types(
             self, from_relation: BaseRelation, to_relation: BaseRelation
         ) -> None:
             if not isinstance(from_relation, self.Relation):
    -            raise InvalidMacroArgType(
    +            raise MacroArgTypeError(
                     method_name="expand_target_column_types",
                     arg_name="from_relation",
                     got_value=from_relation,
    @@ -683,7 +680,7 @@ def expand_target_column_types(
                 )
     
             if not isinstance(to_relation, self.Relation):
    -            raise InvalidMacroArgType(
    +            raise MacroArgTypeError(
                     method_name="expand_target_column_types",
                     arg_name="to_relation",
                     got_value=to_relation,
    @@ -765,7 +762,7 @@ def get_relation(self, database: str, schema: str, identifier: str) -> Optional[
                     "schema": schema,
                     "database": database,
                 }
    -            raise RelationReturnedMultipleResults(kwargs, matches)
    +            raise RelationReturnedMultipleResultsError(kwargs, matches)
     
             elif matches:
                 return matches[0]
    @@ -787,20 +784,20 @@ def already_exists(self, schema: str, name: str) -> bool:
         @available.parse_none
         def create_schema(self, relation: BaseRelation):
             """Create the given schema if it does not exist."""
    -        raise NotImplementedException("`create_schema` is not implemented for this adapter!")
    +        raise NotImplementedError("`create_schema` is not implemented for this adapter!")
     
         @abc.abstractmethod
         @available.parse_none
         def drop_schema(self, relation: BaseRelation):
             """Drop the given schema (and everything in it) if it exists."""
    -        raise NotImplementedException("`drop_schema` is not implemented for this adapter!")
    +        raise NotImplementedError("`drop_schema` is not implemented for this adapter!")
     
         @available
         @classmethod
         @abc.abstractmethod
         def quote(cls, identifier: str) -> str:
             """Quote the given identifier, as appropriate for the database."""
    -        raise NotImplementedException("`quote` is not implemented for this adapter!")
    +        raise NotImplementedError("`quote` is not implemented for this adapter!")
     
         @available
         def quote_as_configured(self, identifier: str, quote_key: str) -> str:
    @@ -829,7 +826,7 @@ def quote_seed_column(self, column: str, quote_config: Optional[bool]) -> str:
             elif quote_config is None:
                 pass
             else:
    -            raise InvalidQuoteConfigType(quote_config)
    +            raise QuoteConfigTypeError(quote_config)
     
             if quote_columns:
                 return self.quote(column)
    @@ -850,7 +847,7 @@ def convert_text_type(cls, agate_table: agate.Table, col_idx: int) -> str:
             :param col_idx: The index into the agate table for the column.
             :return: The name of the type in the database
             """
    -        raise NotImplementedException("`convert_text_type` is not implemented for this adapter!")
    +        raise NotImplementedError("`convert_text_type` is not implemented for this adapter!")
     
         @classmethod
         @abc.abstractmethod
    @@ -862,7 +859,7 @@ def convert_number_type(cls, agate_table: agate.Table, col_idx: int) -> str:
             :param col_idx: The index into the agate table for the column.
             :return: The name of the type in the database
             """
    -        raise NotImplementedException("`convert_number_type` is not implemented for this adapter!")
    +        raise NotImplementedError("`convert_number_type` is not implemented for this adapter!")
     
         @classmethod
         @abc.abstractmethod
    @@ -874,9 +871,7 @@ def convert_boolean_type(cls, agate_table: agate.Table, col_idx: int) -> str:
             :param col_idx: The index into the agate table for the column.
             :return: The name of the type in the database
             """
    -        raise NotImplementedException(
    -            "`convert_boolean_type` is not implemented for this adapter!"
    -        )
    +        raise NotImplementedError("`convert_boolean_type` is not implemented for this adapter!")
     
         @classmethod
         @abc.abstractmethod
    @@ -888,9 +883,7 @@ def convert_datetime_type(cls, agate_table: agate.Table, col_idx: int) -> str:
             :param col_idx: The index into the agate table for the column.
             :return: The name of the type in the database
             """
    -        raise NotImplementedException(
    -            "`convert_datetime_type` is not implemented for this adapter!"
    -        )
    +        raise NotImplementedError("`convert_datetime_type` is not implemented for this adapter!")
     
         @classmethod
         @abc.abstractmethod
    @@ -902,7 +895,7 @@ def convert_date_type(cls, agate_table: agate.Table, col_idx: int) -> str:
             :param col_idx: The index into the agate table for the column.
             :return: The name of the type in the database
             """
    -        raise NotImplementedException("`convert_date_type` is not implemented for this adapter!")
    +        raise NotImplementedError("`convert_date_type` is not implemented for this adapter!")
     
         @classmethod
         @abc.abstractmethod
    @@ -914,7 +907,7 @@ def convert_time_type(cls, agate_table: agate.Table, col_idx: int) -> str:
             :param col_idx: The index into the agate table for the column.
             :return: The name of the type in the database
             """
    -        raise NotImplementedException("`convert_time_type` is not implemented for this adapter!")
    +        raise NotImplementedError("`convert_time_type` is not implemented for this adapter!")
     
         @available
         @classmethod
    @@ -949,7 +942,7 @@ def execute_macro(
             context_override: Optional[Dict[str, Any]] = None,
             kwargs: Dict[str, Any] = None,
             text_only_columns: Optional[Iterable[str]] = None,
    -    ) -> agate.Table:
    +    ) -> AttrDict:
             """Look macro_name up in the manifest and execute its results.
     
             :param macro_name: The name of the macro to execute.
    @@ -981,7 +974,7 @@ def execute_macro(
                 else:
                     package_name = 'the "{}" package'.format(project)
     
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     'dbt could not find a macro with the name "{}" in {}'.format(
                         macro_name, package_name
                     )
    @@ -1034,7 +1027,7 @@ def _get_one_catalog(
                 manifest=manifest,
             )
     
    -        results = self._catalog_filter_table(table, manifest)
    +        results = self._catalog_filter_table(table, manifest)  # type: ignore[arg-type]
             return results
     
         def get_catalog(self, manifest: Manifest) -> Tuple[agate.Table, List[Exception]]:
    @@ -1066,7 +1059,7 @@ def calculate_freshness(
             loaded_at_field: str,
             filter: Optional[str],
             manifest: Optional[Manifest] = None,
    -    ) -> Dict[str, Any]:
    +    ) -> Tuple[AdapterResponse, Dict[str, Any]]:
             """Calculate the freshness of sources in dbt, and return it"""
             kwargs: Dict[str, Any] = {
                 "source": source,
    @@ -1075,11 +1068,12 @@ def calculate_freshness(
             }
     
             # run the macro
    -        table = self.execute_macro(FRESHNESS_MACRO_NAME, kwargs=kwargs, manifest=manifest)
    +        result = self.execute_macro(FRESHNESS_MACRO_NAME, kwargs=kwargs, manifest=manifest)
    +        adapter_response, table = result.response, result.table  # type: ignore[attr-defined]
             # now we have a 1-row table of the maximum `loaded_at_field` value and
             # the current time according to the db.
             if len(table) != 1 or len(table[0]) != 2:
    -            raise InvalidMacroResult(FRESHNESS_MACRO_NAME, table)
    +            raise MacroResultError(FRESHNESS_MACRO_NAME, table)
             if table[0][0] is None:
                 # no records in the table, so really the max_loaded_at was
                 # infinitely long ago. Just call it 0:00 January 1 year UTC
    @@ -1089,11 +1083,12 @@ def calculate_freshness(
     
             snapshotted_at = _utc(table[0][1], source, loaded_at_field)
             age = (snapshotted_at - max_loaded_at).total_seconds()
    -        return {
    +        freshness = {
                 "max_loaded_at": max_loaded_at,
                 "snapshotted_at": snapshotted_at,
                 "age": age,
             }
    +        return adapter_response, freshness
     
         def pre_model_hook(self, config: Mapping[str, Any]) -> Any:
             """A hook for running some operation before the model materialization
    @@ -1156,7 +1151,7 @@ def string_add_sql(
             elif location == "prepend":
                 return f"'{value}' || {add_to}"
             else:
    -            raise RuntimeException(f'Got an unexpected location value of "{location}"')
    +            raise DbtRuntimeError(f'Got an unexpected location value of "{location}"')
     
         def get_rows_different_sql(
             self,
    @@ -1214,7 +1209,7 @@ def submit_python_job(self, parsed_model: dict, compiled_code: str) -> AdapterRe
             return self.generate_python_submission_response(submission_result)
     
         def generate_python_submission_response(self, submission_result: Any) -> AdapterResponse:
    -        raise NotImplementedException(
    +        raise NotImplementedError(
                 "Your adapter need to implement generate_python_submission_response"
             )
     
    @@ -1238,7 +1233,7 @@ def get_incremental_strategy_macro(self, model_context, strategy: str):
             valid_strategies.append("default")
             builtin_strategies = self.builtin_incremental_strategies()
             if strategy in builtin_strategies and strategy not in valid_strategies:
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     f"The incremental strategy '{strategy}' is not valid for this adapter"
                 )
     
    @@ -1246,7 +1241,7 @@ def get_incremental_strategy_macro(self, model_context, strategy: str):
             macro_name = f"get_incremental_{strategy}_sql"
             # The model_context should have MacroGenerator callable objects for all macros
             if macro_name not in model_context:
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     'dbt could not find an incremental strategy macro with the name "{}" in {}'.format(
                         macro_name, self.config.project_name
                     )
    diff --git a/core/dbt/adapters/base/plugin.py b/core/dbt/adapters/base/plugin.py
    index f841ac772c2..58481f75439 100644
    --- a/core/dbt/adapters/base/plugin.py
    +++ b/core/dbt/adapters/base/plugin.py
    @@ -1,7 +1,7 @@
     from typing import List, Optional, Type
     
     from dbt.adapters.base import Credentials
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     from dbt.adapters.protocol import AdapterProtocol
     
     
    @@ -11,7 +11,7 @@ def project_name_from_path(include_path: str) -> str:
     
         partial = PartialProject.from_project_root(include_path)
         if partial.project_name is None:
    -        raise CompilationException(f"Invalid project at {include_path}: name not set!")
    +        raise CompilationError(f"Invalid project at {include_path}: name not set!")
         return partial.project_name
     
     
    diff --git a/core/dbt/adapters/base/query_headers.py b/core/dbt/adapters/base/query_headers.py
    index dd88fdb2d41..bfacd2aee8c 100644
    --- a/core/dbt/adapters/base/query_headers.py
    +++ b/core/dbt/adapters/base/query_headers.py
    @@ -7,7 +7,7 @@
     from dbt.contracts.connection import AdapterRequiredConfig, QueryComment
     from dbt.contracts.graph.nodes import ResultNode
     from dbt.contracts.graph.manifest import Manifest
    -from dbt.exceptions import RuntimeException
    +from dbt.exceptions import DbtRuntimeError
     
     
     class NodeWrapper:
    @@ -48,7 +48,7 @@ def set(self, comment: Optional[str], append: bool):
             if isinstance(comment, str) and "*/" in comment:
                 # tell the user "no" so they don't hurt themselves by writing
                 # garbage
    -            raise RuntimeException(f'query comment contains illegal value "*/": {comment}')
    +            raise DbtRuntimeError(f'query comment contains illegal value "*/": {comment}')
             self.query_comment = comment
             self.append = append
     
    diff --git a/core/dbt/adapters/base/relation.py b/core/dbt/adapters/base/relation.py
    index 5bc0c56b264..13f64c01742 100644
    --- a/core/dbt/adapters/base/relation.py
    +++ b/core/dbt/adapters/base/relation.py
    @@ -11,7 +11,11 @@
         Policy,
         Path,
     )
    -from dbt.exceptions import ApproximateMatch, InternalException, MultipleDatabasesNotAllowed
    +from dbt.exceptions import (
    +    ApproximateMatchError,
    +    DbtInternalError,
    +    MultipleDatabasesNotAllowedError,
    +)
     from dbt.node_types import NodeType
     from dbt.utils import filter_null_values, deep_merge, classproperty
     
    @@ -83,7 +87,7 @@ def matches(
     
             if not search:
                 # nothing was passed in
    -            raise dbt.exceptions.RuntimeException(
    +            raise dbt.exceptions.DbtRuntimeError(
                     "Tried to match relation, but no search path was passed!"
                 )
     
    @@ -100,7 +104,7 @@ def matches(
     
             if approximate_match and not exact_match:
                 target = self.create(database=database, schema=schema, identifier=identifier)
    -            raise ApproximateMatch(target, self)
    +            raise ApproximateMatchError(target, self)
     
             return exact_match
     
    @@ -249,14 +253,14 @@ def create_from(
         ) -> Self:
             if node.resource_type == NodeType.Source:
                 if not isinstance(node, SourceDefinition):
    -                raise InternalException(
    +                raise DbtInternalError(
                         "type mismatch, expected SourceDefinition but got {}".format(type(node))
                     )
                 return cls.create_from_source(node, **kwargs)
             else:
                 # Can't use ManifestNode here because of parameterized generics
                 if not isinstance(node, (ParsedNode)):
    -                raise InternalException(
    +                raise DbtInternalError(
                         f"type mismatch, expected ManifestNode but got {type(node)}"
                     )
                 return cls.create_from_node(config, node, **kwargs)
    @@ -354,7 +358,7 @@ class InformationSchema(BaseRelation):
     
         def __post_init__(self):
             if not isinstance(self.information_schema_view, (type(None), str)):
    -            raise dbt.exceptions.CompilationException(
    +            raise dbt.exceptions.CompilationError(
                     "Got an invalid name: {}".format(self.information_schema_view)
                 )
     
    @@ -438,7 +442,7 @@ def flatten(self, allow_multiple_databases: bool = False):
             if not allow_multiple_databases:
                 seen = {r.database.lower() for r in self if r.database}
                 if len(seen) > 1:
    -                raise MultipleDatabasesNotAllowed(seen)
    +                raise MultipleDatabasesNotAllowedError(seen)
     
             for information_schema_name, schema in self.search():
                 path = {"database": information_schema_name.database, "schema": schema}
    diff --git a/core/dbt/adapters/cache.py b/core/dbt/adapters/cache.py
    index 90c4cab27fb..24a0e469df1 100644
    --- a/core/dbt/adapters/cache.py
    +++ b/core/dbt/adapters/cache.py
    @@ -9,28 +9,14 @@
         _ReferenceKey,
     )
     from dbt.exceptions import (
    -    DependentLinkNotCached,
    -    NewNameAlreadyInCache,
    -    NoneRelationFound,
    -    ReferencedLinkNotCached,
    -    TruncatedModelNameCausedCollision,
    +    DependentLinkNotCachedError,
    +    NewNameAlreadyInCacheError,
    +    NoneRelationFoundError,
    +    ReferencedLinkNotCachedError,
    +    TruncatedModelNameCausedCollisionError,
     )
     from dbt.events.functions import fire_event, fire_event_if
    -from dbt.events.types import (
    -    AddLink,
    -    AddRelation,
    -    DropCascade,
    -    DropMissingRelation,
    -    DropRelation,
    -    DumpAfterAddGraph,
    -    DumpAfterRenameSchema,
    -    DumpBeforeAddGraph,
    -    DumpBeforeRenameSchema,
    -    RenameSchema,
    -    TemporaryRelation,
    -    UncachedRelation,
    -    UpdateReference,
    -)
    +from dbt.events.types import CacheAction, CacheDumpGraph
     import dbt.flags as flags
     from dbt.utils import lowercase
     
    @@ -155,7 +141,7 @@ def rename_key(self, old_key, new_key):
             :raises InternalError: If the new key already exists.
             """
             if new_key in self.referenced_by:
    -            raise NewNameAlreadyInCache(old_key, new_key)
    +            raise NewNameAlreadyInCacheError(old_key, new_key)
     
             if old_key not in self.referenced_by:
                 return
    @@ -271,17 +257,17 @@ def _add_link(self, referenced_key, dependent_key):
             if referenced is None:
                 return
             if referenced is None:
    -            raise ReferencedLinkNotCached(referenced_key)
    +            raise ReferencedLinkNotCachedError(referenced_key)
     
             dependent = self.relations.get(dependent_key)
             if dependent is None:
    -            raise DependentLinkNotCached(dependent_key)
    +            raise DependentLinkNotCachedError(dependent_key)
     
             assert dependent is not None  # we just raised!
     
             referenced.add_reference(dependent)
     
    -    # TODO: Is this dead code?  I can't seem to find it grepping the codebase.
    +    # This is called in plugins/postgres/dbt/adapters/postgres/impl.py
         def add_link(self, referenced, dependent):
             """Add a link between two relations to the database. If either relation
             does not exist, it will be added as an "external" relation.
    @@ -303,9 +289,9 @@ def add_link(self, referenced, dependent):
                 # referring to a table outside our control. There's no need to make
                 # a link - we will never drop the referenced relation during a run.
                 fire_event(
    -                UncachedRelation(
    -                    dep_key=_make_msg_from_ref_key(dep_key),
    +                CacheAction(
                         ref_key=_make_msg_from_ref_key(ref_key),
    +                    ref_key_2=_make_msg_from_ref_key(dep_key),
                     )
                 )
                 return
    @@ -318,8 +304,10 @@ def add_link(self, referenced, dependent):
                 dependent = dependent.replace(type=referenced.External)
                 self.add(dependent)
             fire_event(
    -            AddLink(
    -                dep_key=_make_msg_from_ref_key(dep_key), ref_key=_make_msg_from_ref_key(ref_key)
    +            CacheAction(
    +                action="add_link",
    +                ref_key=_make_msg_from_ref_key(dep_key),
    +                ref_key_2=_make_msg_from_ref_key(ref_key),
                 )
             )
             with self.lock:
    @@ -332,12 +320,18 @@ def add(self, relation):
             :param BaseRelation relation: The underlying relation.
             """
             cached = _CachedRelation(relation)
    -        fire_event(AddRelation(relation=_make_ref_key_msg(cached)))
    -        fire_event_if(flags.LOG_CACHE_EVENTS, lambda: DumpBeforeAddGraph(dump=self.dump_graph()))
    +        fire_event_if(
    +            flags.LOG_CACHE_EVENTS,
    +            lambda: CacheDumpGraph(before_after="before", action="adding", dump=self.dump_graph()),
    +        )
    +        fire_event(CacheAction(action="add_relation", ref_key=_make_ref_key_msg(cached)))
     
             with self.lock:
                 self._setdefault(cached)
    -        fire_event_if(flags.LOG_CACHE_EVENTS, lambda: DumpAfterAddGraph(dump=self.dump_graph()))
    +        fire_event_if(
    +            flags.LOG_CACHE_EVENTS,
    +            lambda: CacheDumpGraph(before_after="after", action="adding", dump=self.dump_graph()),
    +        )
     
         def _remove_refs(self, keys):
             """Removes all references to all entries in keys. This does not
    @@ -365,16 +359,19 @@ def drop(self, relation):
             """
             dropped_key = _make_ref_key(relation)
             dropped_key_msg = _make_ref_key_msg(relation)
    -        fire_event(DropRelation(dropped=dropped_key_msg))
    +        fire_event(CacheAction(action="drop_relation", ref_key=dropped_key_msg))
             with self.lock:
                 if dropped_key not in self.relations:
    -                fire_event(DropMissingRelation(relation=dropped_key_msg))
    +                fire_event(CacheAction(action="drop_missing_relation", ref_key=dropped_key_msg))
                     return
                 consequences = self.relations[dropped_key].collect_consequences()
                 # convert from a list of _ReferenceKeys to a list of ReferenceKeyMsgs
                 consequence_msgs = [_make_msg_from_ref_key(key) for key in consequences]
    -
    -            fire_event(DropCascade(dropped=dropped_key_msg, consequences=consequence_msgs))
    +            fire_event(
    +                CacheAction(
    +                    action="drop_cascade", ref_key=dropped_key_msg, ref_list=consequence_msgs
    +                )
    +            )
                 self._remove_refs(consequences)
     
         def _rename_relation(self, old_key, new_relation):
    @@ -397,12 +394,14 @@ def _rename_relation(self, old_key, new_relation):
             for cached in self.relations.values():
                 if cached.is_referenced_by(old_key):
                     fire_event(
    -                    UpdateReference(
    -                        old_key=_make_ref_key_msg(old_key),
    -                        new_key=_make_ref_key_msg(new_key),
    -                        cached_key=_make_ref_key_msg(cached.key()),
    +                    CacheAction(
    +                        action="update_reference",
    +                        ref_key=_make_ref_key_msg(old_key),
    +                        ref_key_2=_make_ref_key_msg(new_key),
    +                        ref_key_3=_make_ref_key_msg(cached.key()),
                         )
                     )
    +
                     cached.rename_key(old_key, new_key)
     
             self.relations[new_key] = relation
    @@ -427,10 +426,12 @@ def _check_rename_constraints(self, old_key, new_key):
             if new_key in self.relations:
                 # Tell user when collision caused by model names truncated during
                 # materialization.
    -            raise TruncatedModelNameCausedCollision(new_key, self.relations)
    +            raise TruncatedModelNameCausedCollisionError(new_key, self.relations)
     
             if old_key not in self.relations:
    -            fire_event(TemporaryRelation(key=_make_msg_from_ref_key(old_key)))
    +            fire_event(
    +                CacheAction(action="temporary_relation", ref_key=_make_msg_from_ref_key(old_key))
    +            )
                 return False
             return True
     
    @@ -449,13 +450,16 @@ def rename(self, old, new):
             old_key = _make_ref_key(old)
             new_key = _make_ref_key(new)
             fire_event(
    -            RenameSchema(
    -                old_key=_make_msg_from_ref_key(old_key), new_key=_make_msg_from_ref_key(new)
    +            CacheAction(
    +                action="rename_relation",
    +                ref_key=_make_msg_from_ref_key(old_key),
    +                ref_key_2=_make_msg_from_ref_key(new),
                 )
             )
     
             fire_event_if(
    -            flags.LOG_CACHE_EVENTS, lambda: DumpBeforeRenameSchema(dump=self.dump_graph())
    +            flags.LOG_CACHE_EVENTS,
    +            lambda: CacheDumpGraph(before_after="before", action="rename", dump=self.dump_graph()),
             )
     
             with self.lock:
    @@ -465,7 +469,8 @@ def rename(self, old, new):
                     self._setdefault(_CachedRelation(new))
     
             fire_event_if(
    -            flags.LOG_CACHE_EVENTS, lambda: DumpAfterRenameSchema(dump=self.dump_graph())
    +            flags.LOG_CACHE_EVENTS,
    +            lambda: CacheDumpGraph(before_after="after", action="rename", dump=self.dump_graph()),
             )
     
         def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[Any]:
    @@ -485,7 +490,7 @@ def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[
                 ]
     
             if None in results:
    -            raise NoneRelationFound()
    +            raise NoneRelationFoundError()
             return results
     
         def clear(self):
    diff --git a/core/dbt/adapters/factory.py b/core/dbt/adapters/factory.py
    index 16a0a3ffcd1..38c6bcb7894 100644
    --- a/core/dbt/adapters/factory.py
    +++ b/core/dbt/adapters/factory.py
    @@ -10,7 +10,7 @@
     from dbt.contracts.connection import AdapterRequiredConfig, Credentials
     from dbt.events.functions import fire_event
     from dbt.events.types import AdapterImportError, PluginLoadError
    -from dbt.exceptions import InternalException, RuntimeException
    +from dbt.exceptions import DbtInternalError, DbtRuntimeError
     from dbt.include.global_project import PACKAGE_PATH as GLOBAL_PROJECT_PATH
     from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
     
    @@ -34,7 +34,7 @@ def get_plugin_by_name(self, name: str) -> AdapterPlugin:
                 names = ", ".join(self.plugins.keys())
     
             message = f"Invalid adapter type {name}! Must be one of {names}"
    -        raise RuntimeException(message)
    +        raise DbtRuntimeError(message)
     
         def get_adapter_class_by_name(self, name: str) -> Type[Adapter]:
             plugin = self.get_plugin_by_name(name)
    @@ -60,7 +60,7 @@ def load_plugin(self, name: str) -> Type[Credentials]:
                 # the user about it via a runtime error
                 if exc.name == "dbt.adapters." + name:
                     fire_event(AdapterImportError(exc=str(exc)))
    -                raise RuntimeException(f"Could not find adapter type {name}!")
    +                raise DbtRuntimeError(f"Could not find adapter type {name}!")
                 # otherwise, the error had to have come from some underlying
                 # library. Log the stack trace.
     
    @@ -70,7 +70,7 @@ def load_plugin(self, name: str) -> Type[Credentials]:
             plugin_type = plugin.adapter.type()
     
             if plugin_type != name:
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     f"Expected to find adapter with type named {name}, got "
                     f"adapter with type {plugin_type}"
                 )
    @@ -132,7 +132,7 @@ def get_adapter_plugins(self, name: Optional[str]) -> List[AdapterPlugin]:
                 try:
                     plugin = self.plugins[plugin_name]
                 except KeyError:
    -                raise InternalException(f"No plugin found for {plugin_name}") from None
    +                raise DbtInternalError(f"No plugin found for {plugin_name}") from None
                 plugins.append(plugin)
                 seen.add(plugin_name)
                 for dep in plugin.dependencies:
    @@ -151,7 +151,7 @@ def get_include_paths(self, name: Optional[str]) -> List[Path]:
                 try:
                     path = self.packages[package_name]
                 except KeyError:
    -                raise InternalException(f"No internal package listing found for {package_name}")
    +                raise DbtInternalError(f"No internal package listing found for {package_name}")
                 paths.append(path)
             return paths
     
    diff --git a/core/dbt/adapters/sql/connections.py b/core/dbt/adapters/sql/connections.py
    index bc1a562ad86..e13cf12e319 100644
    --- a/core/dbt/adapters/sql/connections.py
    +++ b/core/dbt/adapters/sql/connections.py
    @@ -27,9 +27,7 @@ class SQLConnectionManager(BaseConnectionManager):
         @abc.abstractmethod
         def cancel(self, connection: Connection):
             """Cancel the given connection."""
    -        raise dbt.exceptions.NotImplementedException(
    -            "`cancel` is not implemented for this adapter!"
    -        )
    +        raise dbt.exceptions.NotImplementedError("`cancel` is not implemented for this adapter!")
     
         def cancel_open(self) -> List[str]:
             names = []
    @@ -95,7 +93,7 @@ def add_query(
         @abc.abstractmethod
         def get_response(cls, cursor: Any) -> AdapterResponse:
             """Get the status of the cursor."""
    -        raise dbt.exceptions.NotImplementedException(
    +        raise dbt.exceptions.NotImplementedError(
                 "`get_response` is not implemented for this adapter!"
             )
     
    @@ -151,7 +149,7 @@ def add_commit_query(self):
         def begin(self):
             connection = self.get_thread_connection()
             if connection.transaction_open is True:
    -            raise dbt.exceptions.InternalException(
    +            raise dbt.exceptions.DbtInternalError(
                     'Tried to begin a new transaction on connection "{}", but '
                     "it already had one open!".format(connection.name)
                 )
    @@ -164,7 +162,7 @@ def begin(self):
         def commit(self):
             connection = self.get_thread_connection()
             if connection.transaction_open is False:
    -            raise dbt.exceptions.InternalException(
    +            raise dbt.exceptions.DbtInternalError(
                     'Tried to commit transaction on connection "{}", but '
                     "it does not have one open!".format(connection.name)
                 )
    diff --git a/core/dbt/adapters/sql/impl.py b/core/dbt/adapters/sql/impl.py
    index 4606b046f54..fc787f0c834 100644
    --- a/core/dbt/adapters/sql/impl.py
    +++ b/core/dbt/adapters/sql/impl.py
    @@ -2,7 +2,7 @@
     from typing import Any, Optional, Tuple, Type, List
     
     from dbt.contracts.connection import Connection
    -from dbt.exceptions import RelationTypeNull
    +from dbt.exceptions import RelationTypeNullError
     from dbt.adapters.base import BaseAdapter, available
     from dbt.adapters.cache import _make_ref_key_msg
     from dbt.adapters.sql import SQLConnectionManager
    @@ -131,7 +131,7 @@ def alter_column_type(self, relation, column_name, new_column_type) -> None:
     
         def drop_relation(self, relation):
             if relation.type is None:
    -            raise RelationTypeNull(relation)
    +            raise RelationTypeNullError(relation)
     
             self.cache_dropped(relation)
             self.execute_macro(DROP_RELATION_MACRO_NAME, kwargs={"relation": relation})
    diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py
    index 272334fe303..44a911c3784 100644
    --- a/core/dbt/cli/main.py
    +++ b/core/dbt/cli/main.py
    @@ -1,4 +1,3 @@
    -import inspect  # This is temporary for RAT-ing
     from copy import copy
     from pprint import pformat as pf  # This is temporary for RAT-ing
     from typing import List, Tuple, Optional
    @@ -86,6 +85,7 @@ def invoke(self, args: List[str]) -> Tuple[Optional[List], bool]:
     @p.version
     @p.version_check
     @p.warn_error
    +@p.warn_error_options
     @p.write_json
     def cli(ctx, **kwargs):
         """An ELT tool for managing your SQL transformations and data models.
    diff --git a/core/dbt/cli/option_types.py b/core/dbt/cli/option_types.py
    index e0294c2a096..6149dd7d5ee 100644
    --- a/core/dbt/cli/option_types.py
    +++ b/core/dbt/cli/option_types.py
    @@ -1,7 +1,9 @@
     from click import ParamType, Choice
     
     from dbt.config.utils import parse_cli_vars
    -from dbt.exceptions import ValidationException
    +from dbt.exceptions import ValidationError
    +
    +from dbt.helper_types import WarnErrorOptions
     
     
     class YAML(ParamType):
    @@ -15,10 +17,23 @@ def convert(self, value, param, ctx):
                 self.fail(f"Cannot load YAML from type {type(value)}", param, ctx)
             try:
                 return parse_cli_vars(value)
    -        except ValidationException:
    +        except ValidationError:
                 self.fail(f"String '{value}' is not valid YAML", param, ctx)
     
     
    +class WarnErrorOptionsType(YAML):
    +    """The Click WarnErrorOptions type. Converts YAML strings into objects."""
    +
    +    name = "WarnErrorOptionsType"
    +
    +    def convert(self, value, param, ctx):
    +        include_exclude = super().convert(value, param, ctx)
    +
    +        return WarnErrorOptions(
    +            include=include_exclude.get("include", []), exclude=include_exclude.get("exclude", [])
    +        )
    +
    +
     class Truthy(ParamType):
         """The Click Truthy type.  Converts strings into a "truthy" type"""
     
    diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py
    index 7356267eb9f..1dbc5bffd8d 100644
    --- a/core/dbt/cli/params.py
    +++ b/core/dbt/cli/params.py
    @@ -2,7 +2,7 @@
     
     import click
     from dbt.cli.options import MultiOption
    -from dbt.cli.option_types import YAML, ChoiceTuple
    +from dbt.cli.option_types import YAML, ChoiceTuple, WarnErrorOptionsType
     from dbt.cli.resolvers import default_project_dir, default_profiles_dir
     
     
    @@ -104,7 +104,7 @@
         "--indirect-selection",
         envvar="DBT_INDIRECT_SELECTION",
         help="Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.",
    -    type=click.Choice(["eager", "cautious"], case_sensitive=False),
    +    type=click.Choice(["eager", "cautious", "buildable"], case_sensitive=False),
         default="eager",
     )
     
    @@ -299,7 +299,7 @@
     )
     
     skip_profile_setup = click.option(
    -    "--skip-profile-setup", "-s", envvar=None, help="Skip interative profile setup.", is_flag=True
    +    "--skip-profile-setup", "-s", envvar=None, help="Skip interactive profile setup.", is_flag=True
     )
     
     # TODO:  The env var and name (reflected in flags) are corrections!
    @@ -388,9 +388,20 @@
     )
     
     warn_error = click.option(
    -    "--warn-error/--no-warn-error",
    +    "--warn-error",
         envvar="DBT_WARN_ERROR",
    -    help="If dbt would normally warn, instead raise an exception. Examples include --models that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests.",
    +    help="If dbt would normally warn, instead raise an exception. Examples include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests.",
    +    default=None,
    +    flag_value=True,
    +)
    +
    +warn_error_options = click.option(
    +    "--warn-error-options",
    +    envvar="DBT_WARN_ERROR_OPTIONS",
    +    default=None,
    +    help="""If dbt would normally warn, instead raise an exception based on include/exclude configuration. Examples include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations,
    +    and missing sources/refs in tests. This argument should be a YAML string, with keys 'include' or 'exclude'. eg. '{"include": "all", "exclude": ["NoNodesForSelectionCriteria"]}'""",
    +    type=WarnErrorOptionsType(),
     )
     
     write_json = click.option(
    diff --git a/core/dbt/clients/_jinja_blocks.py b/core/dbt/clients/_jinja_blocks.py
    index fa74a317649..1ada0a6234d 100644
    --- a/core/dbt/clients/_jinja_blocks.py
    +++ b/core/dbt/clients/_jinja_blocks.py
    @@ -2,13 +2,13 @@
     from collections import namedtuple
     
     from dbt.exceptions import (
    -    BlockDefinitionNotAtTop,
    -    InternalException,
    -    MissingCloseTag,
    -    MissingControlFlowStartTag,
    -    NestedTags,
    -    UnexpectedControlFlowEndTag,
    -    UnexpectedMacroEOF,
    +    BlockDefinitionNotAtTopError,
    +    DbtInternalError,
    +    MissingCloseTagError,
    +    MissingControlFlowStartTagError,
    +    NestedTagsError,
    +    UnexpectedControlFlowEndTagError,
    +    UnexpectedMacroEOFError,
     )
     
     
    @@ -147,7 +147,7 @@ def _first_match(self, *patterns, **kwargs):
         def _expect_match(self, expected_name, *patterns, **kwargs):
             match = self._first_match(*patterns, **kwargs)
             if match is None:
    -            raise UnexpectedMacroEOF(expected_name, self.data[self.pos :])
    +            raise UnexpectedMacroEOFError(expected_name, self.data[self.pos :])
             return match
     
         def handle_expr(self, match):
    @@ -261,7 +261,7 @@ def find_tags(self):
                 elif block_type_name is not None:
                     yield self.handle_tag(match)
                 else:
    -                raise InternalException(
    +                raise DbtInternalError(
                         "Invalid regex match in next_block, expected block start, "
                         "expr start, or comment start"
                     )
    @@ -317,16 +317,16 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True):
                         found = self.stack.pop()
                     else:
                         expected = _CONTROL_FLOW_END_TAGS[tag.block_type_name]
    -                    raise UnexpectedControlFlowEndTag(tag, expected, self.tag_parser)
    +                    raise UnexpectedControlFlowEndTagError(tag, expected, self.tag_parser)
                     expected = _CONTROL_FLOW_TAGS[found]
                     if expected != tag.block_type_name:
    -                    raise MissingControlFlowStartTag(tag, expected, self.tag_parser)
    +                    raise MissingControlFlowStartTagError(tag, expected, self.tag_parser)
     
                 if tag.block_type_name in allowed_blocks:
                     if self.stack:
    -                    raise BlockDefinitionNotAtTop(self.tag_parser, tag.start)
    +                    raise BlockDefinitionNotAtTopError(self.tag_parser, tag.start)
                     if self.current is not None:
    -                    raise NestedTags(outer=self.current, inner=tag)
    +                    raise NestedTagsError(outer=self.current, inner=tag)
                     if collect_raw_data:
                         raw_data = self.data[self.last_position : tag.start]
                         self.last_position = tag.start
    @@ -347,7 +347,7 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True):
     
             if self.current:
                 linecount = self.data[: self.current.end].count("\n") + 1
    -            raise MissingCloseTag(self.current.block_type_name, linecount)
    +            raise MissingCloseTagError(self.current.block_type_name, linecount)
     
             if collect_raw_data:
                 raw_data = self.data[self.last_position :]
    diff --git a/core/dbt/clients/agate_helper.py b/core/dbt/clients/agate_helper.py
    index 11492a9faef..1d69a2bd17f 100644
    --- a/core/dbt/clients/agate_helper.py
    +++ b/core/dbt/clients/agate_helper.py
    @@ -7,7 +7,7 @@
     import dbt.utils
     from typing import Iterable, List, Dict, Union, Optional, Any
     
    -from dbt.exceptions import RuntimeException
    +from dbt.exceptions import DbtRuntimeError
     
     
     BOM = BOM_UTF8.decode("utf-8")  # '\ufeff'
    @@ -168,7 +168,7 @@ def __setitem__(self, key, value):
                 return
             elif not isinstance(value, type(existing_type)):
                 # actual type mismatch!
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     f"Tables contain columns with the same names ({key}), "
                     f"but different types ({value} vs {existing_type})"
                 )
    diff --git a/core/dbt/clients/git.py b/core/dbt/clients/git.py
    index 4ddbb1969ee..d6cb3f3870c 100644
    --- a/core/dbt/clients/git.py
    +++ b/core/dbt/clients/git.py
    @@ -16,8 +16,8 @@
         CommandResultError,
         GitCheckoutError,
         GitCloningError,
    -    GitCloningProblem,
    -    RuntimeException,
    +    UnknownGitCloningProblemError,
    +    DbtRuntimeError,
     )
     from packaging import version
     
    @@ -134,7 +134,7 @@ def clone_and_checkout(
             err = exc.stderr
             exists = re.match("fatal: destination path '(.+)' already exists", err)
             if not exists:
    -            raise GitCloningProblem(repo)
    +            raise UnknownGitCloningProblemError(repo)
     
         directory = None
         start_sha = None
    @@ -144,7 +144,7 @@ def clone_and_checkout(
         else:
             matches = re.match("Cloning into '(.+)'", err.decode("utf-8"))
             if matches is None:
    -            raise RuntimeException(f'Error cloning {repo} - never saw "Cloning into ..." from git')
    +            raise DbtRuntimeError(f'Error cloning {repo} - never saw "Cloning into ..." from git')
             directory = matches.group(1)
             fire_event(GitProgressPullingNewDependency(dir=directory))
         full_path = os.path.join(cwd, directory)
    diff --git a/core/dbt/clients/jinja.py b/core/dbt/clients/jinja.py
    index c1b8865e33e..e9dcb45017b 100644
    --- a/core/dbt/clients/jinja.py
    +++ b/core/dbt/clients/jinja.py
    @@ -28,17 +28,17 @@
     from dbt.contracts.graph.nodes import GenericTestNode
     
     from dbt.exceptions import (
    -    CaughtMacroException,
    -    CaughtMacroExceptionWithNode,
    -    CompilationException,
    -    InternalException,
    -    InvalidMaterializationArg,
    -    JinjaRenderingException,
    +    CaughtMacroError,
    +    CaughtMacroErrorWithNodeError,
    +    CompilationError,
    +    DbtInternalError,
    +    MaterializationArgError,
    +    JinjaRenderingError,
         MacroReturn,
    -    MaterializtionMacroNotUsed,
    -    NoSupportedLanguagesFound,
    -    UndefinedCompilation,
    -    UndefinedMacroException,
    +    MaterializtionMacroNotUsedError,
    +    NoSupportedLanguagesFoundError,
    +    UndefinedCompilationError,
    +    UndefinedMacroError,
     )
     from dbt import flags
     from dbt.node_types import ModelLanguage
    @@ -161,9 +161,9 @@ def quoted_native_concat(nodes):
         except (ValueError, SyntaxError, MemoryError):
             result = raw
         if isinstance(raw, BoolMarker) and not isinstance(result, bool):
    -        raise JinjaRenderingException(f"Could not convert value '{raw!s}' into type 'bool'")
    +        raise JinjaRenderingError(f"Could not convert value '{raw!s}' into type 'bool'")
         if isinstance(raw, NumberMarker) and not _is_number(result):
    -        raise JinjaRenderingException(f"Could not convert value '{raw!s}' into type 'number'")
    +        raise JinjaRenderingError(f"Could not convert value '{raw!s}' into type 'number'")
     
         return result
     
    @@ -241,12 +241,12 @@ def exception_handler(self) -> Iterator[None]:
             try:
                 yield
             except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e:
    -            raise CaughtMacroException(e)
    +            raise CaughtMacroError(e)
     
         def call_macro(self, *args, **kwargs):
             # called from __call__ methods
             if self.context is None:
    -            raise InternalException("Context is still None in call_macro!")
    +            raise DbtInternalError("Context is still None in call_macro!")
             assert self.context is not None
     
             macro = self.get_macro()
    @@ -273,7 +273,7 @@ def push(self, name):
         def pop(self, name):
             got = self.call_stack.pop()
             if got != name:
    -            raise InternalException(f"popped {got}, expected {name}")
    +            raise DbtInternalError(f"popped {got}, expected {name}")
     
     
     class MacroGenerator(BaseMacroGenerator):
    @@ -300,8 +300,8 @@ def exception_handler(self) -> Iterator[None]:
             try:
                 yield
             except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e:
    -            raise CaughtMacroExceptionWithNode(exc=e, node=self.macro)
    -        except CompilationException as e:
    +            raise CaughtMacroErrorWithNodeError(exc=e, node=self.macro)
    +        except CompilationError as e:
                 e.stack.append(self.macro)
                 raise e
     
    @@ -380,7 +380,7 @@ def parse(self, parser):
                     node.defaults.append(languages)
     
                 else:
    -                raise InvalidMaterializationArg(materialization_name, target.name)
    +                raise MaterializationArgError(materialization_name, target.name)
     
             if SUPPORTED_LANG_ARG not in node.args:
                 node.args.append(SUPPORTED_LANG_ARG)
    @@ -455,7 +455,7 @@ def __call__(self, *args, **kwargs):
                 return self
     
             def __reduce__(self):
    -            raise UndefinedCompilation(name=self.name, node=node)
    +            raise UndefinedCompilationError(name=self.name, node=node)
     
         return Undefined
     
    @@ -513,10 +513,10 @@ def catch_jinja(node=None) -> Iterator[None]:
             yield
         except jinja2.exceptions.TemplateSyntaxError as e:
             e.translated = False
    -        raise CompilationException(str(e), node) from e
    +        raise CompilationError(str(e), node) from e
         except jinja2.exceptions.UndefinedError as e:
    -        raise UndefinedMacroException(str(e), node) from e
    -    except CompilationException as exc:
    +        raise UndefinedMacroError(str(e), node) from e
    +    except CompilationError as exc:
             exc.add_node(node)
             raise
     
    @@ -655,13 +655,13 @@ def _convert_function(value: Any, keypath: Tuple[Union[str, int], ...]) -> Any:
     
     def get_supported_languages(node: jinja2.nodes.Macro) -> List[ModelLanguage]:
         if "materialization" not in node.name:
    -        raise MaterializtionMacroNotUsed(node=node)
    +        raise MaterializtionMacroNotUsedError(node=node)
     
         no_kwargs = not node.defaults
         no_langs_found = SUPPORTED_LANG_ARG not in node.args
     
         if no_kwargs or no_langs_found:
    -        raise NoSupportedLanguagesFound(node=node)
    +        raise NoSupportedLanguagesFoundError(node=node)
     
         lang_idx = node.args.index(SUPPORTED_LANG_ARG)
         # indexing defaults from the end
    diff --git a/core/dbt/clients/jinja_static.py b/core/dbt/clients/jinja_static.py
    index d71211cea6e..47790166ae5 100644
    --- a/core/dbt/clients/jinja_static.py
    +++ b/core/dbt/clients/jinja_static.py
    @@ -1,6 +1,6 @@
     import jinja2
     from dbt.clients.jinja import get_environment
    -from dbt.exceptions import MacroNamespaceNotString, MacroNameNotString
    +from dbt.exceptions import MacroNamespaceNotStringError, MacroNameNotStringError
     
     
     def statically_extract_macro_calls(string, ctx, db_wrapper=None):
    @@ -117,14 +117,14 @@ def statically_parse_adapter_dispatch(func_call, ctx, db_wrapper):
                         func_name = kwarg.value.value
                         possible_macro_calls.append(func_name)
                     else:
    -                    raise MacroNameNotString(kwarg_value=kwarg.value.value)
    +                    raise MacroNameNotStringError(kwarg_value=kwarg.value.value)
                 elif kwarg.key == "macro_namespace":
                     # This will remain to enable static resolution
                     kwarg_type = type(kwarg.value).__name__
                     if kwarg_type == "Const":
                         macro_namespace = kwarg.value.value
                     else:
    -                    raise MacroNamespaceNotString(kwarg_type)
    +                    raise MacroNamespaceNotStringError(kwarg_type)
     
         # positional arguments
         if packages_arg:
    diff --git a/core/dbt/clients/system.py b/core/dbt/clients/system.py
    index e5a02b68475..dd802afaeca 100644
    --- a/core/dbt/clients/system.py
    +++ b/core/dbt/clients/system.py
    @@ -20,11 +20,11 @@
         SystemCouldNotWrite,
         SystemErrorRetrievingModTime,
         SystemExecutingCmd,
    +    SystemStdOut,
    +    SystemStdErr,
         SystemReportReturnCode,
    -    SystemStdErrMsg,
    -    SystemStdOutMsg,
     )
    -from dbt.exceptions import InternalException
    +from dbt.exceptions import DbtInternalError
     from dbt.utils import _connection_exception_retry as connection_exception_retry
     from pathspec import PathSpec  # type: ignore
     
    @@ -115,7 +115,7 @@ def make_directory(path=None) -> None:
         exist. This function handles the case where two threads try to create
         a directory at once.
         """
    -    raise InternalException(f"Can not create directory from {type(path)} ")
    +    raise DbtInternalError(f"Can not create directory from {type(path)} ")
     
     
     @make_directory.register
    @@ -425,7 +425,7 @@ def _interpret_oserror(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn:
             _handle_posix_error(exc, cwd, cmd)
     
         # this should not be reachable, raise _something_ at least!
    -    raise dbt.exceptions.InternalException(
    +    raise dbt.exceptions.DbtInternalError(
             "Unhandled exception in _interpret_oserror: {}".format(exc)
         )
     
    @@ -454,8 +454,8 @@ def run_cmd(cwd: str, cmd: List[str], env: Optional[Dict[str, Any]] = None) -> T
         except OSError as exc:
             _interpret_oserror(exc, cwd, cmd)
     
    -    fire_event(SystemStdOutMsg(bmsg=out))
    -    fire_event(SystemStdErrMsg(bmsg=err))
    +    fire_event(SystemStdOut(bmsg=out))
    +    fire_event(SystemStdErr(bmsg=err))
     
         if proc.returncode != 0:
             fire_event(SystemReportReturnCode(returncode=proc.returncode))
    diff --git a/core/dbt/clients/yaml_helper.py b/core/dbt/clients/yaml_helper.py
    index bc0ada41ebb..d5a29b0309f 100644
    --- a/core/dbt/clients/yaml_helper.py
    +++ b/core/dbt/clients/yaml_helper.py
    @@ -60,4 +60,4 @@ def load_yaml_text(contents, path=None):
             else:
                 error = str(e)
     
    -        raise dbt.exceptions.ValidationException(error)
    +        raise dbt.exceptions.DbtValidationError(error)
    diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py
    index 4ae78fd3485..a89f36d9f31 100644
    --- a/core/dbt/compilation.py
    +++ b/core/dbt/compilation.py
    @@ -1,11 +1,12 @@
    -import os
    -from collections import defaultdict
    -from typing import List, Dict, Any, Tuple, Optional
    -
    +import argparse
     import networkx as nx  # type: ignore
    +import os
     import pickle
     import sqlparse
     
    +from collections import defaultdict
    +from typing import List, Dict, Any, Tuple, Optional
    +
     from dbt import flags
     from dbt.adapters.factory import get_adapter
     from dbt.clients import jinja
    @@ -21,9 +22,9 @@
         SeedNode,
     )
     from dbt.exceptions import (
    -    GraphDependencyNotFound,
    -    InternalException,
    -    RuntimeException,
    +    GraphDependencyNotFoundError,
    +    DbtInternalError,
    +    DbtRuntimeError,
     )
     from dbt.graph import Graph
     from dbt.events.functions import fire_event
    @@ -32,6 +33,7 @@
     from dbt.node_types import NodeType, ModelLanguage
     from dbt.events.format import pluralize
     import dbt.tracking
    +import dbt.task.list as list_task
     
     graph_file_name = "graph.gpickle"
     
    @@ -257,7 +259,7 @@ def _recursively_prepend_ctes(
             inserting CTEs into the SQL.
             """
             if model.compiled_code is None:
    -            raise RuntimeException("Cannot inject ctes into an unparsed node", model)
    +            raise DbtRuntimeError("Cannot inject ctes into an unparsed node", model)
             if model.extra_ctes_injected:
                 return (model, model.extra_ctes)
     
    @@ -278,7 +280,7 @@ def _recursively_prepend_ctes(
             # ephemeral model.
             for cte in model.extra_ctes:
                 if cte.id not in manifest.nodes:
    -                raise InternalException(
    +                raise DbtInternalError(
                         f"During compilation, found a cte reference that "
                         f"could not be resolved: {cte.id}"
                     )
    @@ -286,7 +288,7 @@ def _recursively_prepend_ctes(
                 assert not isinstance(cte_model, SeedNode)
     
                 if not cte_model.is_ephemeral_model:
    -                raise InternalException(f"{cte.id} is not ephemeral")
    +                raise DbtInternalError(f"{cte.id} is not ephemeral")
     
                 # This model has already been compiled, so it's been
                 # through here before
    @@ -351,13 +353,6 @@ def _compile_node(
             )
     
             if node.language == ModelLanguage.python:
    -            # TODO could we also 'minify' this code at all? just aesthetic, not functional
    -
    -            # quoating seems like something very specific to sql so far
    -            # for all python implementations we are seeing there's no quating.
    -            # TODO try to find better way to do this, given that
    -            original_quoting = self.config.quoting
    -            self.config.quoting = {key: False for key in original_quoting.keys()}
                 context = self._create_node_context(node, manifest, extra_context)
     
                 postfix = jinja.get_rendered(
    @@ -367,8 +362,6 @@ def _compile_node(
                 )
                 # we should NOT jinja render the python model's 'raw code'
                 node.compiled_code = f"{node.raw_code}\n\n{postfix}"
    -            # restore quoting settings in the end since context is lazy evaluated
    -            self.config.quoting = original_quoting
     
             else:
                 context = self._create_node_context(node, manifest, extra_context)
    @@ -399,7 +392,7 @@ def link_node(self, linker: Linker, node: GraphMemberNode, manifest: Manifest):
                 elif dependency in manifest.metrics:
                     linker.dependency(node.unique_id, (manifest.metrics[dependency].unique_id))
                 else:
    -                raise GraphDependencyNotFound(node, dependency)
    +                raise GraphDependencyNotFoundError(node, dependency)
     
         def link_graph(self, linker: Linker, manifest: Manifest, add_test_edges: bool = False):
             for source in manifest.sources.values():
    @@ -482,7 +475,13 @@ def compile(self, manifest: Manifest, write=True, add_test_edges=False) -> Graph
     
             if write:
                 self.write_graph_file(linker, manifest)
    -        print_compile_stats(stats)
    +
    +        # Do not print these for ListTask's
    +        if not (
    +            self.config.args.__class__ == argparse.Namespace
    +            and self.config.args.cls == list_task.ListTask
    +        ):
    +            print_compile_stats(stats)
     
             return Graph(linker.graph)
     
    diff --git a/core/dbt/config/profile.py b/core/dbt/config/profile.py
    index 36eddfe33e0..8f7bb80fbbb 100644
    --- a/core/dbt/config/profile.py
    +++ b/core/dbt/config/profile.py
    @@ -10,12 +10,12 @@
     from dbt.contracts.connection import Credentials, HasCredentials
     from dbt.contracts.project import ProfileConfig, UserConfig
     from dbt.exceptions import (
    -    CompilationException,
    +    CompilationError,
         DbtProfileError,
         DbtProjectError,
    -    ValidationException,
    -    RuntimeException,
    -    ProfileConfigInvalid,
    +    DbtValidationError,
    +    DbtRuntimeError,
    +    ProfileConfigError,
     )
     from dbt.events.types import MissingProfileTarget
     from dbt.events.functions import fire_event
    @@ -60,9 +60,9 @@ def read_profile(profiles_dir: str) -> Dict[str, Any]:
                     msg = f"The profiles.yml file at {path} is empty"
                     raise DbtProfileError(INVALID_PROFILE_MESSAGE.format(error_string=msg))
                 return yaml_content
    -        except ValidationException as e:
    +        except DbtValidationError as e:
                 msg = INVALID_PROFILE_MESSAGE.format(error_string=e)
    -            raise ValidationException(msg) from e
    +            raise DbtValidationError(msg) from e
     
         return {}
     
    @@ -75,7 +75,7 @@ def read_user_config(directory: str) -> UserConfig:
                 if user_config is not None:
                     UserConfig.validate(user_config)
                     return UserConfig.from_dict(user_config)
    -    except (RuntimeException, ValidationError):
    +    except (DbtRuntimeError, ValidationError):
             pass
         return UserConfig()
     
    @@ -158,7 +158,7 @@ def validate(self):
                 dct = self.to_profile_info(serialize_credentials=True)
                 ProfileConfig.validate(dct)
             except ValidationError as exc:
    -            raise ProfileConfigInvalid(exc) from exc
    +            raise ProfileConfigError(exc) from exc
     
         @staticmethod
         def _credentials_from_profile(
    @@ -182,8 +182,8 @@ def _credentials_from_profile(
                 data = cls.translate_aliases(profile)
                 cls.validate(data)
                 credentials = cls.from_dict(data)
    -        except (RuntimeException, ValidationError) as e:
    -            msg = str(e) if isinstance(e, RuntimeException) else e.message
    +        except (DbtRuntimeError, ValidationError) as e:
    +            msg = str(e) if isinstance(e, DbtRuntimeError) else e.message
                 raise DbtProfileError(
                     'Credentials in profile "{}", target "{}" invalid: {}'.format(
                         profile_name, target_name, msg
    @@ -299,7 +299,7 @@ def render_profile(
     
             try:
                 profile_data = renderer.render_data(raw_profile_data)
    -        except CompilationException as exc:
    +        except CompilationError as exc:
                 raise DbtProfileError(str(exc)) from exc
             return target_name, profile_data
     
    diff --git a/core/dbt/config/project.py b/core/dbt/config/project.py
    index ebbe2684d22..3391578fb55 100644
    --- a/core/dbt/config/project.py
    +++ b/core/dbt/config/project.py
    @@ -21,10 +21,10 @@
     from dbt.contracts.connection import QueryComment
     from dbt.exceptions import (
         DbtProjectError,
    -    SemverException,
    -    ProjectContractBroken,
    -    ProjectContractInvalid,
    -    RuntimeException,
    +    SemverError,
    +    ProjectContractBrokenError,
    +    ProjectContractError,
    +    DbtRuntimeError,
     )
     from dbt.graph import SelectionSpec
     from dbt.helper_types import NoValue
    @@ -75,6 +75,11 @@
     {error}
     """
     
    +MISSING_DBT_PROJECT_ERROR = """\
    +No dbt_project.yml found at expected path {path}
    +Verify that each entry within packages.yml (and their transitive dependencies) contains a file named dbt_project.yml
    +"""
    +
     
     @runtime_checkable
     class IsFQNResource(Protocol):
    @@ -163,9 +168,7 @@ def load_raw_project(project_root: str) -> Dict[str, Any]:
     
         # get the project.yml contents
         if not path_exists(project_yaml_filepath):
    -        raise DbtProjectError(
    -            "no dbt_project.yml found at expected path {}".format(project_yaml_filepath)
    -        )
    +        raise DbtProjectError(MISSING_DBT_PROJECT_ERROR.format(path=project_yaml_filepath))
     
         project_dict = _load_yaml(project_yaml_filepath)
     
    @@ -219,7 +222,7 @@ def _get_required_version(
     
         try:
             dbt_version = _parse_versions(dbt_raw_version)
    -    except SemverException as e:
    +    except SemverError as e:
             raise DbtProjectError(str(e)) from e
     
         if verify_version:
    @@ -332,7 +335,7 @@ def create_project(self, rendered: RenderComponents) -> "Project":
                 ProjectContract.validate(rendered.project_dict)
                 cfg = ProjectContract.from_dict(rendered.project_dict)
             except ValidationError as e:
    -            raise ProjectContractInvalid(e) from e
    +            raise ProjectContractError(e) from e
             # name/version are required in the Project definition, so we can assume
             # they are present
             name = cfg.name
    @@ -649,7 +652,14 @@ def validate(self):
             try:
                 ProjectContract.validate(self.to_project_config())
             except ValidationError as e:
    -            raise ProjectContractBroken(e) from e
    +            raise ProjectContractBrokenError(e) from e
    +
    +    @classmethod
    +    def partial_load(cls, project_root: str, *, verify_version: bool = False) -> PartialProject:
    +        return PartialProject.from_project_root(
    +            project_root,
    +            verify_version=verify_version,
    +        )
     
         @classmethod
         def from_project_root(
    @@ -667,7 +677,7 @@ def hashed_name(self):
     
         def get_selector(self, name: str) -> Union[SelectionSpec, bool]:
             if name not in self.selectors:
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     f"Could not find selector named {name}, expected one of {list(self.selectors)}"
                 )
             return self.selectors[name]["definition"]
    diff --git a/core/dbt/config/renderer.py b/core/dbt/config/renderer.py
    index 0a5be710ec4..3de826d4d54 100644
    --- a/core/dbt/config/renderer.py
    +++ b/core/dbt/config/renderer.py
    @@ -8,7 +8,7 @@
     from dbt.context.secret import SecretContext, SECRET_PLACEHOLDER
     from dbt.context.base import BaseContext
     from dbt.contracts.connection import HasCredentials
    -from dbt.exceptions import DbtProjectError, CompilationException, RecursionException
    +from dbt.exceptions import DbtProjectError, CompilationError, RecursionError
     from dbt.utils import deep_map_render
     
     
    @@ -40,14 +40,14 @@ def render_value(self, value: Any, keypath: Optional[Keypath] = None) -> Any:
             try:
                 with catch_jinja():
                     return get_rendered(value, self.context, native=True)
    -        except CompilationException as exc:
    +        except CompilationError as exc:
                 msg = f"Could not render {value}: {exc.msg}"
    -            raise CompilationException(msg) from exc
    +            raise CompilationError(msg) from exc
     
         def render_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
             try:
                 return deep_map_render(self.render_entry, data)
    -        except RecursionException:
    +        except RecursionError:
                 raise DbtProjectError(
                     f"Cycle detected: {self.name} input has a reference to itself", project=data
                 )
    @@ -159,7 +159,8 @@ def should_render_keypath(self, keypath: Keypath) -> bool:
             if first in {"seeds", "models", "snapshots", "tests"}:
                 keypath_parts = {(k.lstrip("+ ") if isinstance(k, str) else k) for k in keypath}
                 # model-level hooks
    -            if "pre-hook" in keypath_parts or "post-hook" in keypath_parts:
    +            late_rendered_hooks = {"pre-hook", "post-hook", "pre_hook", "post_hook"}
    +            if keypath_parts.intersection(late_rendered_hooks):
                     return False
     
             return True
    diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py
    index 19806087a20..d019f2d6f8d 100644
    --- a/core/dbt/config/runtime.py
    +++ b/core/dbt/config/runtime.py
    @@ -26,11 +26,11 @@
     from dbt.events.functions import warn_or_error
     from dbt.events.types import UnusedResourceConfigPath
     from dbt.exceptions import (
    -    ConfigContractBroken,
    +    ConfigContractBrokenError,
         DbtProjectError,
    -    NonUniquePackageName,
    -    RuntimeException,
    -    UninstalledPackagesFound,
    +    NonUniquePackageNameError,
    +    DbtRuntimeError,
    +    UninstalledPackagesFoundError,
     )
     from dbt.helper_types import DictDefaultEmptyStr, FQNPath, PathSet
     from .profile import Profile
    @@ -235,7 +235,7 @@ def validate(self):
             try:
                 Configuration.validate(self.serialize())
             except ValidationError as e:
    -            raise ConfigContractBroken(e) from e
    +            raise ConfigContractBrokenError(e) from e
     
         @classmethod
         def collect_parts(cls: Type["RuntimeConfig"], args: Any) -> Tuple[Project, Profile]:
    @@ -260,7 +260,7 @@ def from_args(cls, args: Any) -> "RuntimeConfig":
             :param args: The arguments as parsed from the cli.
             :raises DbtProjectError: If the project is invalid or missing.
             :raises DbtProfileError: If the profile is invalid or missing.
    -        :raises ValidationException: If the cli variables are invalid.
    +        :raises DbtValidationError: If the cli variables are invalid.
             """
             project, profile = cls.collect_parts(args)
     
    @@ -355,7 +355,7 @@ def load_dependencies(self, base_only=False) -> Mapping[str, "RuntimeConfig"]:
                     count_packages_specified = len(self.packages.packages)  # type: ignore
                     count_packages_installed = len(tuple(self._get_project_directories()))
                     if count_packages_specified > count_packages_installed:
    -                    raise UninstalledPackagesFound(
    +                    raise UninstalledPackagesFoundError(
                             count_packages_specified,
                             count_packages_installed,
                             self.packages_install_path,
    @@ -363,7 +363,7 @@ def load_dependencies(self, base_only=False) -> Mapping[str, "RuntimeConfig"]:
                     project_paths = itertools.chain(internal_packages, self._get_project_directories())
                 for project_name, project in self.load_projects(project_paths):
                     if project_name in all_projects:
    -                    raise NonUniquePackageName(project_name)
    +                    raise NonUniquePackageNameError(project_name)
                     all_projects[project_name] = project
                 self.dependencies = all_projects
             return self.dependencies
    @@ -428,7 +428,7 @@ def to_target_dict(self):
     
         def __getattribute__(self, name):
             if name in {"profile_name", "target_name", "threads"}:
    -            raise RuntimeException(f'Error: disallowed attribute "{name}" - no profile!')
    +            raise DbtRuntimeError(f'Error: disallowed attribute "{name}" - no profile!')
     
             return Profile.__getattribute__(self, name)
     
    diff --git a/core/dbt/config/selectors.py b/core/dbt/config/selectors.py
    index 193a1bb70a8..e26ee01d316 100644
    --- a/core/dbt/config/selectors.py
    +++ b/core/dbt/config/selectors.py
    @@ -12,7 +12,7 @@
         resolve_path_from_base,
     )
     from dbt.contracts.selection import SelectorFile
    -from dbt.exceptions import DbtSelectorsError, RuntimeException
    +from dbt.exceptions import DbtSelectorsError, DbtRuntimeError
     from dbt.graph import parse_from_selectors_definition, SelectionSpec
     from dbt.graph.selector_spec import SelectionCriteria
     
    @@ -46,7 +46,7 @@ def selectors_from_dict(cls, data: Dict[str, Any]) -> "SelectorConfig":
                     f"yaml-selectors",
                     result_type="invalid_selector",
                 ) from exc
    -        except RuntimeException as exc:
    +        except DbtRuntimeError as exc:
                 raise DbtSelectorsError(
                     f"Could not read selector file data: {exc}",
                     result_type="invalid_selector",
    @@ -62,7 +62,7 @@ def render_from_dict(
         ) -> "SelectorConfig":
             try:
                 rendered = renderer.render_data(data)
    -        except (ValidationError, RuntimeException) as exc:
    +        except (ValidationError, DbtRuntimeError) as exc:
                 raise DbtSelectorsError(
                     f"Could not render selector data: {exc}",
                     result_type="invalid_selector",
    @@ -77,7 +77,7 @@ def from_path(
         ) -> "SelectorConfig":
             try:
                 data = load_yaml_text(load_file_contents(str(path)))
    -        except (ValidationError, RuntimeException) as exc:
    +        except (ValidationError, DbtRuntimeError) as exc:
                 raise DbtSelectorsError(
                     f"Could not read selector file: {exc}",
                     result_type="invalid_selector",
    diff --git a/core/dbt/config/utils.py b/core/dbt/config/utils.py
    index 76fd8f6b466..f1deccfb92c 100644
    --- a/core/dbt/config/utils.py
    +++ b/core/dbt/config/utils.py
    @@ -8,20 +8,24 @@
     from dbt.config import Profile, Project, read_user_config
     from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer
     from dbt.events.functions import fire_event
    -from dbt.events.types import InvalidVarsYAML
    -from dbt.exceptions import ValidationException, VarsArgNotYamlDict
    +from dbt.events.types import InvalidOptionYAML
    +from dbt.exceptions import DbtValidationError, OptionNotYamlDictError
     
     
    -def parse_cli_vars(var: str) -> Dict[str, Any]:
    +def parse_cli_vars(var_string: str) -> Dict[str, Any]:
    +    return parse_cli_yaml_string(var_string, "vars")
    +
    +
    +def parse_cli_yaml_string(var_string: str, cli_option_name: str) -> Dict[str, Any]:
         try:
    -        cli_vars = yaml_helper.load_yaml_text(var)
    +        cli_vars = yaml_helper.load_yaml_text(var_string)
             var_type = type(cli_vars)
             if var_type is dict:
                 return cli_vars
             else:
    -            raise VarsArgNotYamlDict(var_type)
    -    except ValidationException:
    -        fire_event(InvalidVarsYAML())
    +            raise OptionNotYamlDictError(var_type, cli_option_name)
    +    except DbtValidationError:
    +        fire_event(InvalidOptionYAML(option_name=cli_option_name))
             raise
     
     
    diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py
    index 59984cb96ab..edf0895fe31 100644
    --- a/core/dbt/context/base.py
    +++ b/core/dbt/context/base.py
    @@ -10,12 +10,12 @@
     from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER
     from dbt.contracts.graph.nodes import Resource
     from dbt.exceptions import (
    -    DisallowSecretEnvVar,
    -    EnvVarMissing,
    +    SecretEnvVarLocationError,
    +    EnvVarMissingError,
         MacroReturn,
    -    RequiredVarNotFound,
    -    SetStrictWrongType,
    -    ZipStrictWrongType,
    +    RequiredVarNotFoundError,
    +    SetStrictWrongTypeError,
    +    ZipStrictWrongTypeError,
     )
     from dbt.events.functions import fire_event, get_invocation_id
     from dbt.events.types import JinjaLogInfo, JinjaLogDebug
    @@ -153,7 +153,7 @@ def node_name(self):
                 return ""
     
         def get_missing_var(self, var_name):
    -        raise RequiredVarNotFound(var_name, self._merged, self._node)
    +        raise RequiredVarNotFoundError(var_name, self._merged, self._node)
     
         def has_var(self, var_name: str):
             return var_name in self._merged
    @@ -297,7 +297,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str:
             """
             return_value = None
             if var.startswith(SECRET_ENV_PREFIX):
    -            raise DisallowSecretEnvVar(var)
    +            raise SecretEnvVarLocationError(var)
             if var in os.environ:
                 return_value = os.environ[var]
             elif default is not None:
    @@ -312,7 +312,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str:
     
                 return return_value
             else:
    -            raise EnvVarMissing(var)
    +            raise EnvVarMissingError(var)
     
         if os.environ.get("DBT_MACRO_DEBUGGING"):
     
    @@ -493,7 +493,7 @@ def set_strict(value: Iterable[Any]) -> Set[Any]:
             try:
                 return set(value)
             except TypeError as e:
    -            raise SetStrictWrongType(e)
    +            raise SetStrictWrongTypeError(e)
     
         @contextmember("zip")
         @staticmethod
    @@ -537,7 +537,7 @@ def zip_strict(*args: Iterable[Any]) -> Iterable[Any]:
             try:
                 return zip(*args)
             except TypeError as e:
    -            raise ZipStrictWrongType(e)
    +            raise ZipStrictWrongTypeError(e)
     
         @contextmember
         @staticmethod
    @@ -634,9 +634,8 @@ def flags(self) -> Any:
                 {% endif %}
     
             This supports all flags defined in flags submodule (core/dbt/flags.py)
    -        TODO: Replace with object that provides read-only access to flag values
             """
    -        return flags
    +        return flags.get_flag_obj()
     
         @contextmember
         @staticmethod
    diff --git a/core/dbt/context/configured.py b/core/dbt/context/configured.py
    index 7339bdb1152..0ba5ce7ca3d 100644
    --- a/core/dbt/context/configured.py
    +++ b/core/dbt/context/configured.py
    @@ -8,7 +8,7 @@
     
     from dbt.context.base import contextproperty, contextmember, Var
     from dbt.context.target import TargetContext
    -from dbt.exceptions import EnvVarMissing, DisallowSecretEnvVar
    +from dbt.exceptions import EnvVarMissingError, SecretEnvVarLocationError
     
     
     class ConfiguredContext(TargetContext):
    @@ -87,7 +87,7 @@ def var(self) -> ConfiguredVar:
         def env_var(self, var: str, default: Optional[str] = None) -> str:
             return_value = None
             if var.startswith(SECRET_ENV_PREFIX):
    -            raise DisallowSecretEnvVar(var)
    +            raise SecretEnvVarLocationError(var)
             if var in os.environ:
                 return_value = os.environ[var]
             elif default is not None:
    @@ -105,7 +105,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str:
     
                 return return_value
             else:
    -            raise EnvVarMissing(var)
    +            raise EnvVarMissingError(var)
     
     
     class MacroResolvingContext(ConfiguredContext):
    diff --git a/core/dbt/context/context_config.py b/core/dbt/context/context_config.py
    index 2b0aafe7189..b497887ab45 100644
    --- a/core/dbt/context/context_config.py
    +++ b/core/dbt/context/context_config.py
    @@ -5,7 +5,7 @@
     
     from dbt.config import RuntimeConfig, Project, IsFQNResource
     from dbt.contracts.graph.model_config import BaseConfig, get_config_for, _listify
    -from dbt.exceptions import InternalException
    +from dbt.exceptions import DbtInternalError
     from dbt.node_types import NodeType
     from dbt.utils import fqn_search
     
    @@ -89,7 +89,7 @@ def get_node_project(self, project_name: str):
                 return self._active_project
             dependencies = self._active_project.load_dependencies()
             if project_name not in dependencies:
    -            raise InternalException(
    +            raise DbtInternalError(
                     f"Project name {project_name} not found in dependencies "
                     f"(found {list(dependencies)})"
                 )
    @@ -287,14 +287,14 @@ def _add_config_call(cls, config_call_dict, opts: Dict[str, Any]) -> None:
     
                 elif k in BaseConfig.mergebehavior["update"]:
                     if not isinstance(v, dict):
    -                    raise InternalException(f"expected dict, got {v}")
    +                    raise DbtInternalError(f"expected dict, got {v}")
                     if k in config_call_dict and isinstance(config_call_dict[k], dict):
                         config_call_dict[k].update(v)
                     else:
                         config_call_dict[k] = v
                 elif k in BaseConfig.mergebehavior["dict_key_append"]:
                     if not isinstance(v, dict):
    -                    raise InternalException(f"expected dict, got {v}")
    +                    raise DbtInternalError(f"expected dict, got {v}")
                     if k in config_call_dict:  # should always be a dict
                         for key, value in v.items():
                             extend = False
    diff --git a/core/dbt/context/docs.py b/core/dbt/context/docs.py
    index 89a652736dd..3d5abf42e11 100644
    --- a/core/dbt/context/docs.py
    +++ b/core/dbt/context/docs.py
    @@ -1,8 +1,8 @@
     from typing import Any, Dict, Union
     
     from dbt.exceptions import (
    -    DocTargetNotFound,
    -    InvalidDocArgs,
    +    DocTargetNotFoundError,
    +    DocArgsError,
     )
     from dbt.config.runtime import RuntimeConfig
     from dbt.contracts.graph.manifest import Manifest
    @@ -52,7 +52,7 @@ def doc(self, *args: str) -> str:
             elif len(args) == 2:
                 doc_package_name, doc_name = args
             else:
    -            raise InvalidDocArgs(self.node, args)
    +            raise DocArgsError(self.node, args)
     
             # Documentation
             target_doc = self.manifest.resolve_doc(
    @@ -68,7 +68,7 @@ def doc(self, *args: str) -> str:
                     # TODO CT-211
                     source_file.add_node(self.node.unique_id)  # type: ignore[union-attr]
             else:
    -            raise DocTargetNotFound(
    +            raise DocTargetNotFoundError(
                     node=self.node, target_doc_name=doc_name, target_doc_package=doc_package_name
                 )
     
    diff --git a/core/dbt/context/exceptions_jinja.py b/core/dbt/context/exceptions_jinja.py
    index 5663b4701e0..98f19048f1a 100644
    --- a/core/dbt/context/exceptions_jinja.py
    +++ b/core/dbt/context/exceptions_jinja.py
    @@ -6,23 +6,23 @@
     from dbt.events.types import JinjaLogWarning
     
     from dbt.exceptions import (
    -    RuntimeException,
    -    MissingConfig,
    -    MissingMaterialization,
    -    MissingRelation,
    -    AmbiguousAlias,
    -    AmbiguousCatalogMatch,
    -    CacheInconsistency,
    -    DataclassNotDict,
    -    CompilationException,
    -    DatabaseException,
    -    DependencyNotFound,
    -    DependencyException,
    -    DuplicatePatchPath,
    -    DuplicateResourceName,
    -    InvalidPropertyYML,
    -    NotImplementedException,
    -    RelationWrongType,
    +    DbtRuntimeError,
    +    MissingConfigError,
    +    MissingMaterializationError,
    +    MissingRelationError,
    +    AmbiguousAliasError,
    +    AmbiguousCatalogMatchError,
    +    CacheInconsistencyError,
    +    DataclassNotDictError,
    +    CompilationError,
    +    DbtDatabaseError,
    +    DependencyNotFoundError,
    +    DependencyError,
    +    DuplicatePatchPathError,
    +    DuplicateResourceNameError,
    +    PropertyYMLError,
    +    NotImplementedError,
    +    RelationWrongTypeError,
     )
     
     
    @@ -32,67 +32,69 @@ def warn(msg, node=None):
     
     
     def missing_config(model, name) -> NoReturn:
    -    raise MissingConfig(unique_id=model.unique_id, name=name)
    +    raise MissingConfigError(unique_id=model.unique_id, name=name)
     
     
     def missing_materialization(model, adapter_type) -> NoReturn:
    -    raise MissingMaterialization(model=model, adapter_type=adapter_type)
    +    raise MissingMaterializationError(
    +        materialization=model.config.materialized, adapter_type=adapter_type
    +    )
     
     
     def missing_relation(relation, model=None) -> NoReturn:
    -    raise MissingRelation(relation, model)
    +    raise MissingRelationError(relation, model)
     
     
     def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn:
    -    raise AmbiguousAlias(node_1, node_2, duped_name)
    +    raise AmbiguousAliasError(node_1, node_2, duped_name)
     
     
     def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn:
    -    raise AmbiguousCatalogMatch(unique_id, match_1, match_2)
    +    raise AmbiguousCatalogMatchError(unique_id, match_1, match_2)
     
     
     def raise_cache_inconsistent(message) -> NoReturn:
    -    raise CacheInconsistency(message)
    +    raise CacheInconsistencyError(message)
     
     
     def raise_dataclass_not_dict(obj) -> NoReturn:
    -    raise DataclassNotDict(obj)
    +    raise DataclassNotDictError(obj)
     
     
     def raise_compiler_error(msg, node=None) -> NoReturn:
    -    raise CompilationException(msg, node)
    +    raise CompilationError(msg, node)
     
     
     def raise_database_error(msg, node=None) -> NoReturn:
    -    raise DatabaseException(msg, node)
    +    raise DbtDatabaseError(msg, node)
     
     
     def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn:
    -    raise DependencyNotFound(node, node_description, required_pkg)
    +    raise DependencyNotFoundError(node, node_description, required_pkg)
     
     
     def raise_dependency_error(msg) -> NoReturn:
    -    raise DependencyException(scrub_secrets(msg, env_secrets()))
    +    raise DependencyError(scrub_secrets(msg, env_secrets()))
     
     
     def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn:
    -    raise DuplicatePatchPath(patch_1, existing_patch_path)
    +    raise DuplicatePatchPathError(patch_1, existing_patch_path)
     
     
     def raise_duplicate_resource_name(node_1, node_2) -> NoReturn:
    -    raise DuplicateResourceName(node_1, node_2)
    +    raise DuplicateResourceNameError(node_1, node_2)
     
     
     def raise_invalid_property_yml_version(path, issue) -> NoReturn:
    -    raise InvalidPropertyYML(path, issue)
    +    raise PropertyYMLError(path, issue)
     
     
     def raise_not_implemented(msg) -> NoReturn:
    -    raise NotImplementedException(msg)
    +    raise NotImplementedError(msg)
     
     
     def relation_wrong_type(relation, expected_type, model=None) -> NoReturn:
    -    raise RelationWrongType(relation, expected_type, model)
    +    raise RelationWrongTypeError(relation, expected_type, model)
     
     
     # Update this when a new function should be added to the
    @@ -128,7 +130,7 @@ def wrap(func):
             def inner(*args, **kwargs):
                 try:
                     return func(*args, **kwargs)
    -            except RuntimeException as exc:
    +            except DbtRuntimeError as exc:
                     exc.add_node(model)
                     raise exc
     
    diff --git a/core/dbt/context/macro_resolver.py b/core/dbt/context/macro_resolver.py
    index 6e70bafd05e..20f97febcb0 100644
    --- a/core/dbt/context/macro_resolver.py
    +++ b/core/dbt/context/macro_resolver.py
    @@ -1,6 +1,6 @@
     from typing import Dict, MutableMapping, Optional
     from dbt.contracts.graph.nodes import Macro
    -from dbt.exceptions import DuplicateMacroName, PackageNotFoundForMacro
    +from dbt.exceptions import DuplicateMacroNameError, PackageNotFoundForMacroError
     from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
     from dbt.clients.jinja import MacroGenerator
     
    @@ -86,7 +86,7 @@ def _add_macro_to(
                 package_namespaces[macro.package_name] = namespace
     
             if macro.name in namespace:
    -            raise DuplicateMacroName(macro, macro, macro.package_name)
    +            raise DuplicateMacroNameError(macro, macro, macro.package_name)
             package_namespaces[macro.package_name][macro.name] = macro
     
         def add_macro(self, macro: Macro):
    @@ -187,7 +187,7 @@ def get_from_package(self, package_name: Optional[str], name: str) -> Optional[M
             elif package_name in self.macro_resolver.packages:
                 macro = self.macro_resolver.packages[package_name].get(name)
             else:
    -            raise PackageNotFoundForMacro(package_name)
    +            raise PackageNotFoundForMacroError(package_name)
             if not macro:
                 return None
             macro_func = MacroGenerator(macro, self.ctx, self.node, self.thread_ctx)
    diff --git a/core/dbt/context/macros.py b/core/dbt/context/macros.py
    index 921480ec05a..1c61e564e06 100644
    --- a/core/dbt/context/macros.py
    +++ b/core/dbt/context/macros.py
    @@ -3,7 +3,7 @@
     from dbt.clients.jinja import MacroGenerator, MacroStack
     from dbt.contracts.graph.nodes import Macro
     from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
    -from dbt.exceptions import DuplicateMacroName, PackageNotFoundForMacro
    +from dbt.exceptions import DuplicateMacroNameError, PackageNotFoundForMacroError
     
     
     FlatNamespace = Dict[str, MacroGenerator]
    @@ -75,7 +75,7 @@ def get_from_package(self, package_name: Optional[str], name: str) -> Optional[M
             elif package_name in self.packages:
                 return self.packages[package_name].get(name)
             else:
    -            raise PackageNotFoundForMacro(package_name)
    +            raise PackageNotFoundForMacroError(package_name)
     
     
     # This class builds the MacroNamespace by adding macros to
    @@ -122,7 +122,7 @@ def _add_macro_to(
                 hierarchy[macro.package_name] = namespace
     
             if macro.name in namespace:
    -            raise DuplicateMacroName(macro_func.macro, macro, macro.package_name)
    +            raise DuplicateMacroNameError(macro_func.macro, macro, macro.package_name)
             hierarchy[macro.package_name][macro.name] = macro_func
     
         def add_macro(self, macro: Macro, ctx: Dict[str, Any]):
    diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py
    index 2e7af0a79f2..fec5111e36c 100644
    --- a/core/dbt/context/providers.py
    +++ b/core/dbt/context/providers.py
    @@ -41,28 +41,28 @@
     from dbt.contracts.graph.metrics import MetricReference, ResolvedMetricReference
     from dbt.events.functions import get_metadata_vars
     from dbt.exceptions import (
    -    CompilationException,
    -    ConflictingConfigKeys,
    -    DisallowSecretEnvVar,
    -    EnvVarMissing,
    -    InternalException,
    -    InvalidInlineModelConfig,
    -    InvalidNumberSourceArgs,
    -    InvalidPersistDocsValueType,
    -    LoadAgateTableNotSeed,
    +    CompilationError,
    +    ConflictingConfigKeysError,
    +    SecretEnvVarLocationError,
    +    EnvVarMissingError,
    +    DbtInternalError,
    +    InlineModelConfigError,
    +    NumberSourceArgsError,
    +    PersistDocsValueTypeError,
    +    LoadAgateTableNotSeedError,
         LoadAgateTableValueError,
    -    MacroInvalidDispatchArg,
    -    MacrosSourcesUnWriteable,
    -    MetricInvalidArgs,
    -    MissingConfig,
    -    OperationsCannotRefEphemeralNodes,
    -    PackageNotInDeps,
    -    ParsingException,
    -    RefBadContext,
    -    RefInvalidArgs,
    -    RuntimeException,
    -    TargetNotFound,
    -    ValidationException,
    +    MacroDispatchArgError,
    +    MacrosSourcesUnWriteableError,
    +    MetricArgsError,
    +    MissingConfigError,
    +    OperationsCannotRefEphemeralNodesError,
    +    PackageNotInDepsError,
    +    ParsingError,
    +    RefBadContextError,
    +    RefArgsError,
    +    DbtRuntimeError,
    +    TargetNotFoundError,
    +    DbtValidationError,
     )
     from dbt.config import IsFQNResource
     from dbt.node_types import NodeType, ModelLanguage
    @@ -144,10 +144,10 @@ def dispatch(
                     f'`adapter.dispatch("{suggest_macro_name}", '
                     f'macro_namespace="{suggest_macro_namespace}")`?'
                 )
    -            raise CompilationException(msg)
    +            raise CompilationError(msg)
     
             if packages is not None:
    -            raise MacroInvalidDispatchArg(macro_name)
    +            raise MacroDispatchArgError(macro_name)
     
             namespace = macro_namespace
     
    @@ -159,7 +159,7 @@ def dispatch(
                     search_packages = [self.config.project_name, namespace]
             else:
                 # Not a string and not None so must be a list
    -            raise CompilationException(
    +            raise CompilationError(
                     f"In adapter.dispatch, got a list macro_namespace argument "
                     f'("{macro_namespace}"), but macro_namespace should be None or a string.'
                 )
    @@ -172,8 +172,8 @@ def dispatch(
                     try:
                         # this uses the namespace from the context
                         macro = self._namespace.get_from_package(package_name, search_name)
    -                except CompilationException:
    -                    # Only raise CompilationException if macro is not found in
    +                except CompilationError:
    +                    # Only raise CompilationError if macro is not found in
                         # any package
                         macro = None
     
    @@ -187,7 +187,7 @@ def dispatch(
     
             searched = ", ".join(repr(a) for a in attempts)
             msg = f"In dispatch: No macro named '{macro_name}' found\n    Searched for: {searched}"
    -        raise CompilationException(msg)
    +        raise CompilationError(msg)
     
     
     class BaseResolver(metaclass=abc.ABCMeta):
    @@ -223,12 +223,12 @@ def _repack_args(self, name: str, package: Optional[str]) -> List[str]:
     
         def validate_args(self, name: str, package: Optional[str]):
             if not isinstance(name, str):
    -            raise CompilationException(
    +            raise CompilationError(
                     f"The name argument to ref() must be a string, got {type(name)}"
                 )
     
             if package is not None and not isinstance(package, str):
    -            raise CompilationException(
    +            raise CompilationError(
                     f"The package argument to ref() must be a string or None, got {type(package)}"
                 )
     
    @@ -241,7 +241,7 @@ def __call__(self, *args: str) -> RelationProxy:
             elif len(args) == 2:
                 package, name = args
             else:
    -            raise RefInvalidArgs(node=self.model, args=args)
    +            raise RefArgsError(node=self.model, args=args)
             self.validate_args(name, package)
             return self.resolve(name, package)
     
    @@ -253,19 +253,19 @@ def resolve(self, source_name: str, table_name: str):
     
         def validate_args(self, source_name: str, table_name: str):
             if not isinstance(source_name, str):
    -            raise CompilationException(
    +            raise CompilationError(
                     f"The source name (first) argument to source() must be a "
                     f"string, got {type(source_name)}"
                 )
             if not isinstance(table_name, str):
    -            raise CompilationException(
    +            raise CompilationError(
                     f"The table name (second) argument to source() must be a "
                     f"string, got {type(table_name)}"
                 )
     
         def __call__(self, *args: str) -> RelationProxy:
             if len(args) != 2:
    -            raise InvalidNumberSourceArgs(args, node=self.model)
    +            raise NumberSourceArgsError(args, node=self.model)
             self.validate_args(args[0], args[1])
             return self.resolve(args[0], args[1])
     
    @@ -282,12 +282,12 @@ def _repack_args(self, name: str, package: Optional[str]) -> List[str]:
     
         def validate_args(self, name: str, package: Optional[str]):
             if not isinstance(name, str):
    -            raise CompilationException(
    +            raise CompilationError(
                     f"The name argument to metric() must be a string, got {type(name)}"
                 )
     
             if package is not None and not isinstance(package, str):
    -            raise CompilationException(
    +            raise CompilationError(
                     f"The package argument to metric() must be a string or None, got {type(package)}"
                 )
     
    @@ -300,7 +300,7 @@ def __call__(self, *args: str) -> MetricReference:
             elif len(args) == 2:
                 package, name = args
             else:
    -            raise MetricInvalidArgs(node=self.model, args=args)
    +            raise MetricArgsError(node=self.model, args=args)
             self.validate_args(name, package)
             return self.resolve(name, package)
     
    @@ -321,7 +321,7 @@ def _transform_config(self, config):
                 if oldkey in config:
                     newkey = oldkey.replace("_", "-")
                     if newkey in config:
    -                    raise ConflictingConfigKeys(oldkey, newkey, node=self.model)
    +                    raise ConflictingConfigKeysError(oldkey, newkey, node=self.model)
                     config[newkey] = config.pop(oldkey)
             return config
     
    @@ -331,14 +331,14 @@ def __call__(self, *args, **kwargs):
             elif len(args) == 0 and len(kwargs) > 0:
                 opts = kwargs
             else:
    -            raise InvalidInlineModelConfig(node=self.model)
    +            raise InlineModelConfigError(node=self.model)
     
             opts = self._transform_config(opts)
     
             # it's ok to have a parse context with no context config, but you must
             # not call it!
             if self.context_config is None:
    -            raise RuntimeException("At parse time, did not receive a context config")
    +            raise DbtRuntimeError("At parse time, did not receive a context config")
             self.context_config.add_config_call(opts)
             return ""
     
    @@ -379,7 +379,7 @@ def _lookup(self, name, default=_MISSING):
             else:
                 result = self.model.config.get(name, default)
             if result is _MISSING:
    -            raise MissingConfig(unique_id=self.model.unique_id, name=name)
    +            raise MissingConfigError(unique_id=self.model.unique_id, name=name)
             return result
     
         def require(self, name, validator=None):
    @@ -401,14 +401,14 @@ def get(self, name, default=None, validator=None):
         def persist_relation_docs(self) -> bool:
             persist_docs = self.get("persist_docs", default={})
             if not isinstance(persist_docs, dict):
    -            raise InvalidPersistDocsValueType(persist_docs)
    +            raise PersistDocsValueTypeError(persist_docs)
     
             return persist_docs.get("relation", False)
     
         def persist_column_docs(self) -> bool:
             persist_docs = self.get("persist_docs", default={})
             if not isinstance(persist_docs, dict):
    -            raise InvalidPersistDocsValueType(persist_docs)
    +            raise PersistDocsValueTypeError(persist_docs)
     
             return persist_docs.get("columns", False)
     
    @@ -467,7 +467,7 @@ def resolve(self, target_name: str, target_package: Optional[str] = None) -> Rel
             )
     
             if target_model is None or isinstance(target_model, Disabled):
    -            raise TargetNotFound(
    +            raise TargetNotFoundError(
                     node=self.model,
                     target_name=target_name,
                     target_kind="node",
    @@ -489,7 +489,7 @@ def validate(
         ) -> None:
             if resolved.unique_id not in self.model.depends_on.nodes:
                 args = self._repack_args(target_name, target_package)
    -            raise RefBadContext(node=self.model, args=args)
    +            raise RefBadContextError(node=self.model, args=args)
     
     
     class OperationRefResolver(RuntimeRefResolver):
    @@ -505,7 +505,7 @@ def create_relation(self, target_model: ManifestNode, name: str) -> RelationProx
             if target_model.is_ephemeral_model:
                 # In operations, we can't ref() ephemeral nodes, because
                 # Macros do not support set_cte
    -            raise OperationsCannotRefEphemeralNodes(target_model.name, node=self.model)
    +            raise OperationsCannotRefEphemeralNodesError(target_model.name, node=self.model)
             else:
                 return super().create_relation(target_model, name)
     
    @@ -528,7 +528,7 @@ def resolve(self, source_name: str, table_name: str):
             )
     
             if target_source is None or isinstance(target_source, Disabled):
    -            raise TargetNotFound(
    +            raise TargetNotFoundError(
                     node=self.model,
                     target_name=f"{source_name}.{table_name}",
                     target_kind="source",
    @@ -555,7 +555,7 @@ def resolve(self, target_name: str, target_package: Optional[str] = None) -> Met
             )
     
             if target_metric is None or isinstance(target_metric, Disabled):
    -            raise TargetNotFound(
    +            raise TargetNotFoundError(
                     node=self.model,
                     target_name=target_name,
                     target_kind="metric",
    @@ -584,7 +584,7 @@ def packages_for_node(self) -> Iterable[Project]:
             if package_name != self._config.project_name:
                 if package_name not in dependencies:
                     # I don't think this is actually reachable
    -                raise PackageNotInDeps(package_name, node=self._node)
    +                raise PackageNotInDepsError(package_name, node=self._node)
                 yield dependencies[package_name]
             yield self._config
     
    @@ -674,7 +674,7 @@ def __init__(
             context_config: Optional[ContextConfig],
         ) -> None:
             if provider is None:
    -            raise InternalException(f"Invalid provider given to context: {provider}")
    +            raise DbtInternalError(f"Invalid provider given to context: {provider}")
             # mypy appeasement - we know it'll be a RuntimeConfig
             self.config: RuntimeConfig
             self.model: Union[Macro, ManifestNode] = model
    @@ -751,7 +751,7 @@ def inner(value: T) -> None:
                             return
                         elif value == arg:
                             return
    -                raise ValidationException(
    +                raise DbtValidationError(
                         'Expected value "{}" to be one of {}'.format(value, ",".join(map(str, args)))
                     )
     
    @@ -767,7 +767,7 @@ def inner(value: T) -> None:
         def write(self, payload: str) -> str:
             # macros/source defs aren't 'writeable'.
             if isinstance(self.model, (Macro, SourceDefinition)):
    -            raise MacrosSourcesUnWriteable(node=self.model)
    +            raise MacrosSourcesUnWriteableError(node=self.model)
             self.model.build_path = self.model.write_node(self.config.target_path, "run", payload)
             return ""
     
    @@ -782,12 +782,12 @@ def try_or_compiler_error(
             try:
                 return func(*args, **kwargs)
             except Exception:
    -            raise CompilationException(message_if_exception, self.model)
    +            raise CompilationError(message_if_exception, self.model)
     
         @contextmember
         def load_agate_table(self) -> agate.Table:
             if not isinstance(self.model, SeedNode):
    -            raise LoadAgateTableNotSeed(self.model.resource_type, node=self.model)
    +            raise LoadAgateTableNotSeedError(self.model.resource_type, node=self.model)
             assert self.model.root_path
             path = os.path.join(self.model.root_path, self.model.original_file_path)
             column_types = self.model.config.column_types
    @@ -1185,7 +1185,7 @@ def adapter_macro(self, name: str, *args, **kwargs):
                 "https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch)"
                 " adapter_macro was called for: {macro_name}".format(macro_name=name)
             )
    -        raise CompilationException(msg)
    +        raise CompilationError(msg)
     
         @contextmember
         def env_var(self, var: str, default: Optional[str] = None) -> str:
    @@ -1196,7 +1196,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str:
             """
             return_value = None
             if var.startswith(SECRET_ENV_PREFIX):
    -            raise DisallowSecretEnvVar(var)
    +            raise SecretEnvVarLocationError(var)
             if var in os.environ:
                 return_value = os.environ[var]
             elif default is not None:
    @@ -1229,7 +1229,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str:
                             source_file.env_vars.append(var)  # type: ignore[union-attr]
                 return return_value
             else:
    -            raise EnvVarMissing(var)
    +            raise EnvVarMissingError(var)
     
         @contextproperty
         def selected_resources(self) -> List[str]:
    @@ -1248,7 +1248,7 @@ def submit_python_job(self, parsed_model: Dict, compiled_code: str) -> AdapterRe
                 and self.context_macro_stack.call_stack[1] == "macro.dbt.statement"
                 and "materialization" in self.context_macro_stack.call_stack[0]
             ):
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     f"submit_python_job is not intended to be called here, at model {parsed_model['alias']}, with macro call_stack {self.context_macro_stack.call_stack}."
                 )
             return self.adapter.submit_python_job(parsed_model, compiled_code)
    @@ -1410,7 +1410,7 @@ def generate_runtime_macro_context(
     class ExposureRefResolver(BaseResolver):
         def __call__(self, *args) -> str:
             if len(args) not in (1, 2):
    -            raise RefInvalidArgs(node=self.model, args=args)
    +            raise RefArgsError(node=self.model, args=args)
             self.model.refs.append(list(args))
             return ""
     
    @@ -1418,7 +1418,7 @@ def __call__(self, *args) -> str:
     class ExposureSourceResolver(BaseResolver):
         def __call__(self, *args) -> str:
             if len(args) != 2:
    -            raise InvalidNumberSourceArgs(args, node=self.model)
    +            raise NumberSourceArgsError(args, node=self.model)
             self.model.sources.append(list(args))
             return ""
     
    @@ -1426,7 +1426,7 @@ def __call__(self, *args) -> str:
     class ExposureMetricResolver(BaseResolver):
         def __call__(self, *args) -> str:
             if len(args) not in (1, 2):
    -            raise MetricInvalidArgs(node=self.model, args=args)
    +            raise MetricArgsError(node=self.model, args=args)
             self.model.metrics.append(list(args))
             return ""
     
    @@ -1468,14 +1468,14 @@ def __call__(self, *args) -> str:
             elif len(args) == 2:
                 package, name = args
             else:
    -            raise RefInvalidArgs(node=self.model, args=args)
    +            raise RefArgsError(node=self.model, args=args)
             self.validate_args(name, package)
             self.model.refs.append(list(args))
             return ""
     
         def validate_args(self, name, package):
             if not isinstance(name, str):
    -            raise ParsingException(
    +            raise ParsingError(
                     f"In a metrics section in {self.model.original_file_path} "
                     "the name argument to ref() must be a string"
                 )
    @@ -1558,7 +1558,7 @@ def _build_test_namespace(self):
         def env_var(self, var: str, default: Optional[str] = None) -> str:
             return_value = None
             if var.startswith(SECRET_ENV_PREFIX):
    -            raise DisallowSecretEnvVar(var)
    +            raise SecretEnvVarLocationError(var)
             if var in os.environ:
                 return_value = os.environ[var]
             elif default is not None:
    @@ -1584,7 +1584,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str:
                         source_file.add_env_var(var, yaml_key, name)  # type: ignore[union-attr]
                 return return_value
             else:
    -            raise EnvVarMissing(var)
    +            raise EnvVarMissingError(var)
     
     
     def generate_test_context(
    diff --git a/core/dbt/context/secret.py b/core/dbt/context/secret.py
    index da13509ef50..4d8ff342aff 100644
    --- a/core/dbt/context/secret.py
    +++ b/core/dbt/context/secret.py
    @@ -4,7 +4,7 @@
     from .base import BaseContext, contextmember
     
     from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER
    -from dbt.exceptions import EnvVarMissing
    +from dbt.exceptions import EnvVarMissingError
     
     
     SECRET_PLACEHOLDER = "$$$DBT_SECRET_START$$${}$$$DBT_SECRET_END$$$"
    @@ -50,7 +50,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str:
                     self.env_vars[var] = return_value if var in os.environ else DEFAULT_ENV_PLACEHOLDER
                 return return_value
             else:
    -            raise EnvVarMissing(var)
    +            raise EnvVarMissingError(var)
     
     
     def generate_secret_context(cli_vars: Dict[str, Any]) -> Dict[str, Any]:
    diff --git a/core/dbt/contracts/connection.py b/core/dbt/contracts/connection.py
    index fe4ae912229..3f12a603363 100644
    --- a/core/dbt/contracts/connection.py
    +++ b/core/dbt/contracts/connection.py
    @@ -12,7 +12,7 @@
         List,
         Callable,
     )
    -from dbt.exceptions import InternalException
    +from dbt.exceptions import DbtInternalError
     from dbt.utils import translate_aliases
     from dbt.events.functions import fire_event
     from dbt.events.types import NewConnectionOpening
    @@ -94,7 +94,7 @@ def handle(self):
                     # this will actually change 'self._handle'.
                     self._handle.resolve(self)
                 except RecursionError as exc:
    -                raise InternalException(
    +                raise DbtInternalError(
                         "A connection's open() method attempted to read the handle value"
                     ) from exc
             return self._handle
    diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py
    index c43012ec521..4dd2ddc2f33 100644
    --- a/core/dbt/contracts/graph/manifest.py
    +++ b/core/dbt/contracts/graph/manifest.py
    @@ -40,10 +40,10 @@
     from dbt.contracts.util import BaseArtifactMetadata, SourceKey, ArtifactMixin, schema_version
     from dbt.dataclass_schema import dbtClassMixin
     from dbt.exceptions import (
    -    CompilationException,
    -    DuplicateResourceName,
    -    DuplicateMacroInPackage,
    -    DuplicateMaterializationName,
    +    CompilationError,
    +    DuplicateResourceNameError,
    +    DuplicateMacroInPackageError,
    +    DuplicateMaterializationNameError,
     )
     from dbt.helper_types import PathSet
     from dbt.events.functions import fire_event
    @@ -102,7 +102,7 @@ def populate(self, manifest):
     
         def perform_lookup(self, unique_id: UniqueID, manifest) -> Documentation:
             if unique_id not in manifest.docs:
    -            raise dbt.exceptions.InternalException(
    +            raise dbt.exceptions.DbtInternalError(
                     f"Doc {unique_id} found in cache but not found in manifest"
                 )
             return manifest.docs[unique_id]
    @@ -135,7 +135,7 @@ def populate(self, manifest):
     
         def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> SourceDefinition:
             if unique_id not in manifest.sources:
    -            raise dbt.exceptions.InternalException(
    +            raise dbt.exceptions.DbtInternalError(
                     f"Source {unique_id} found in cache but not found in manifest"
                 )
             return manifest.sources[unique_id]
    @@ -173,7 +173,7 @@ def populate(self, manifest):
     
         def perform_lookup(self, unique_id: UniqueID, manifest) -> ManifestNode:
             if unique_id not in manifest.nodes:
    -            raise dbt.exceptions.InternalException(
    +            raise dbt.exceptions.DbtInternalError(
                     f"Node {unique_id} found in cache but not found in manifest"
                 )
             return manifest.nodes[unique_id]
    @@ -206,7 +206,7 @@ def populate(self, manifest):
     
         def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> Metric:
             if unique_id not in manifest.metrics:
    -            raise dbt.exceptions.InternalException(
    +            raise dbt.exceptions.DbtInternalError(
                     f"Metric {unique_id} found in cache but not found in manifest"
                 )
             return manifest.metrics[unique_id]
    @@ -398,7 +398,7 @@ def __eq__(self, other: object) -> bool:
                 return NotImplemented
             equal = self.specificity == other.specificity and self.locality == other.locality
             if equal:
    -            raise DuplicateMaterializationName(self.macro, other)
    +            raise DuplicateMaterializationNameError(self.macro, other)
     
             return equal
     
    @@ -480,13 +480,13 @@ def _update_into(dest: MutableMapping[str, T], new_item: T):
         """
         unique_id = new_item.unique_id
         if unique_id not in dest:
    -        raise dbt.exceptions.RuntimeException(
    +        raise dbt.exceptions.DbtRuntimeError(
                 f"got an update_{new_item.resource_type} call with an "
                 f"unrecognized {new_item.resource_type}: {new_item.unique_id}"
             )
         existing = dest[unique_id]
         if new_item.original_file_path != existing.original_file_path:
    -        raise dbt.exceptions.RuntimeException(
    +        raise dbt.exceptions.DbtRuntimeError(
                 f"cannot update a {new_item.resource_type} to have a new file path!"
             )
         dest[unique_id] = new_item
    @@ -839,7 +839,7 @@ def expect(self, unique_id: str) -> GraphMemberNode:
                 return self.metrics[unique_id]
             else:
                 # something terrible has happened
    -            raise dbt.exceptions.InternalException(
    +            raise dbt.exceptions.DbtInternalError(
                     "Expected node {} not found in manifest".format(unique_id)
                 )
     
    @@ -1035,7 +1035,7 @@ def merge_from_artifact(
         def add_macro(self, source_file: SourceFile, macro: Macro):
             if macro.unique_id in self.macros:
                 # detect that the macro exists and emit an error
    -            raise DuplicateMacroInPackage(macro=macro, macro_mapping=self.macros)
    +            raise DuplicateMacroInPackageError(macro=macro, macro_mapping=self.macros)
     
             self.macros[macro.unique_id] = macro
             source_file.macros.append(macro.unique_id)
    @@ -1213,7 +1213,7 @@ def __post_serialize__(self, dct):
     
     def _check_duplicates(value: BaseNode, src: Mapping[str, BaseNode]):
         if value.unique_id in src:
    -        raise DuplicateResourceName(value, src[value.unique_id])
    +        raise DuplicateResourceNameError(value, src[value.unique_id])
     
     
     K_T = TypeVar("K_T")
    @@ -1222,7 +1222,7 @@ def _check_duplicates(value: BaseNode, src: Mapping[str, BaseNode]):
     
     def _expect_value(key: K_T, src: Mapping[K_T, V_T], old_file: SourceFile, name: str) -> V_T:
         if key not in src:
    -        raise CompilationException(
    +        raise CompilationError(
                 'Expected to find "{}" in cached "result.{}" based '
                 "on cached file information: {}!".format(key, name, old_file)
             )
    diff --git a/core/dbt/contracts/graph/model_config.py b/core/dbt/contracts/graph/model_config.py
    index b22f724de53..407c5435786 100644
    --- a/core/dbt/contracts/graph/model_config.py
    +++ b/core/dbt/contracts/graph/model_config.py
    @@ -9,7 +9,7 @@
     )
     from dbt.contracts.graph.unparsed import AdditionalPropertiesAllowed, Docs
     from dbt.contracts.graph.utils import validate_color
    -from dbt.exceptions import InternalException, CompilationException
    +from dbt.exceptions import DbtInternalError, CompilationError
     from dbt.contracts.util import Replaceable, list_str
     from dbt import hooks
     from dbt.node_types import NodeType
    @@ -30,7 +30,7 @@ def _get_meta_value(cls: Type[M], fld: Field, key: str, default: Any) -> M:
         try:
             return cls(value)
         except ValueError as exc:
    -        raise InternalException(f"Invalid {cls} value: {value}") from exc
    +        raise DbtInternalError(f"Invalid {cls} value: {value}") from exc
     
     
     def _set_meta_value(obj: M, key: str, existing: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
    @@ -140,17 +140,17 @@ def _merge_field_value(
             return _listify(self_value) + _listify(other_value)
         elif merge_behavior == MergeBehavior.Update:
             if not isinstance(self_value, dict):
    -            raise InternalException(f"expected dict, got {self_value}")
    +            raise DbtInternalError(f"expected dict, got {self_value}")
             if not isinstance(other_value, dict):
    -            raise InternalException(f"expected dict, got {other_value}")
    +            raise DbtInternalError(f"expected dict, got {other_value}")
             value = self_value.copy()
             value.update(other_value)
             return value
         elif merge_behavior == MergeBehavior.DictKeyAppend:
             if not isinstance(self_value, dict):
    -            raise InternalException(f"expected dict, got {self_value}")
    +            raise DbtInternalError(f"expected dict, got {self_value}")
             if not isinstance(other_value, dict):
    -            raise InternalException(f"expected dict, got {other_value}")
    +            raise DbtInternalError(f"expected dict, got {other_value}")
             new_dict = {}
             for key in self_value.keys():
                 new_dict[key] = _listify(self_value[key])
    @@ -172,7 +172,7 @@ def _merge_field_value(
             return new_dict
     
         else:
    -        raise InternalException(f"Got an invalid merge_behavior: {merge_behavior}")
    +        raise DbtInternalError(f"Got an invalid merge_behavior: {merge_behavior}")
     
     
     def insensitive_patterns(*patterns: str):
    @@ -227,7 +227,7 @@ def __delitem__(self, key):
                 msg = (
                     'Error, tried to delete config key "{}": Cannot delete ' "built-in keys"
                 ).format(key)
    -            raise CompilationException(msg)
    +            raise CompilationError(msg)
             else:
                 del self._extra[key]
     
    diff --git a/core/dbt/contracts/graph/nodes.py b/core/dbt/contracts/graph/nodes.py
    index 033318a34c1..a299f5e9b12 100644
    --- a/core/dbt/contracts/graph/nodes.py
    +++ b/core/dbt/contracts/graph/nodes.py
    @@ -46,6 +46,7 @@
     from dbt.events.contextvars import set_contextvars
     from dbt import flags
     from dbt.node_types import ModelLanguage, NodeType
    +from dbt.utils import cast_dict_to_dict_of_strings
     
     
     from .model_config import (
    @@ -206,6 +207,8 @@ class NodeInfoMixin:
     
         @property
         def node_info(self):
    +        meta = getattr(self, "meta", {})
    +        meta_stringified = cast_dict_to_dict_of_strings(meta)
             node_info = {
                 "node_path": getattr(self, "path", None),
                 "node_name": getattr(self, "name", None),
    @@ -215,6 +218,7 @@ def node_info(self):
                 "node_status": str(self._event_status.get("node_status")),
                 "node_started_at": self._event_status.get("started_at"),
                 "node_finished_at": self._event_status.get("finished_at"),
    +            "meta": meta_stringified,
             }
             node_info_msg = NodeInfo(**node_info)
             return node_info_msg
    @@ -976,12 +980,12 @@ class Metric(GraphNode):
         description: str
         label: str
         calculation_method: str
    -    timestamp: str
         expression: str
         filters: List[MetricFilter]
         time_grains: List[str]
         dimensions: List[str]
         resource_type: NodeType = field(metadata={"restrict": [NodeType.Metric]})
    +    timestamp: Optional[str] = None
         window: Optional[MetricTime] = None
         model: Optional[str] = None
         model_unique_id: Optional[str] = None
    diff --git a/core/dbt/contracts/graph/unparsed.py b/core/dbt/contracts/graph/unparsed.py
    index 453dc883d7b..6521e644542 100644
    --- a/core/dbt/contracts/graph/unparsed.py
    +++ b/core/dbt/contracts/graph/unparsed.py
    @@ -11,7 +11,7 @@
     
     # trigger the PathEncoder
     import dbt.helper_types  # noqa:F401
    -from dbt.exceptions import CompilationException, ParsingException
    +from dbt.exceptions import CompilationError, ParsingError
     
     from dbt.dataclass_schema import dbtClassMixin, StrEnum, ExtensibleDbtClassMixin, ValidationError
     
    @@ -222,7 +222,7 @@ class ExternalPartition(AdditionalPropertiesAllowed, Replaceable):
     
         def __post_init__(self):
             if self.name == "" or self.data_type == "":
    -            raise CompilationException("External partition columns must have names and data types")
    +            raise CompilationError("External partition columns must have names and data types")
     
     
     @dataclass
    @@ -484,9 +484,9 @@ class UnparsedMetric(dbtClassMixin, Replaceable):
         name: str
         label: str
         calculation_method: str
    -    timestamp: str
         expression: str
         description: str = ""
    +    timestamp: Optional[str] = None
         time_grains: List[str] = field(default_factory=list)
         dimensions: List[str] = field(default_factory=list)
         window: Optional[MetricTime] = None
    @@ -514,10 +514,20 @@ def validate(cls, data):
                     errors.append("must contain only letters, numbers and underscores")
     
                 if errors:
    -                raise ParsingException(
    +                raise ParsingError(
                         f"The metric name '{data['name']}' is invalid.  It {', '.join(e for e in errors)}"
                     )
     
    +        if data.get("timestamp") is None and data.get("time_grains") is not None:
    +            raise ValidationError(
    +                f"The metric '{data['name']} has time_grains defined but is missing a timestamp dimension."
    +            )
    +
    +        if data.get("timestamp") is None and data.get("window") is not None:
    +            raise ValidationError(
    +                f"The metric '{data['name']} has a window defined but is missing a timestamp dimension."
    +            )
    +
             if data.get("model") is None and data.get("calculation_method") != "derived":
                 raise ValidationError("Non-derived metrics require a 'model' property")
     
    diff --git a/core/dbt/contracts/project.py b/core/dbt/contracts/project.py
    index 2fd7434bd87..ba15b9d32b6 100644
    --- a/core/dbt/contracts/project.py
    +++ b/core/dbt/contracts/project.py
    @@ -249,6 +249,7 @@ class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract):
         printer_width: Optional[int] = None
         write_json: Optional[bool] = None
         warn_error: Optional[bool] = None
    +    warn_error_options: Optional[Dict[str, Union[str, List[str]]]] = None
         log_format: Optional[str] = None
         debug: Optional[bool] = None
         version_check: Optional[bool] = None
    diff --git a/core/dbt/contracts/relation.py b/core/dbt/contracts/relation.py
    index e8cba2ad155..e557c358966 100644
    --- a/core/dbt/contracts/relation.py
    +++ b/core/dbt/contracts/relation.py
    @@ -9,7 +9,7 @@
     from dbt.dataclass_schema import dbtClassMixin, StrEnum
     
     from dbt.contracts.util import Replaceable
    -from dbt.exceptions import CompilationException, DataclassNotDict
    +from dbt.exceptions import CompilationError, DataclassNotDictError
     from dbt.utils import deep_merge
     
     
    @@ -43,10 +43,10 @@ def __getitem__(self, key):
                 raise KeyError(key) from None
     
         def __iter__(self):
    -        raise DataclassNotDict(self)
    +        raise DataclassNotDictError(self)
     
         def __len__(self):
    -        raise DataclassNotDict(self)
    +        raise DataclassNotDictError(self)
     
         def incorporate(self, **kwargs):
             value = self.to_dict(omit_none=True)
    @@ -88,13 +88,11 @@ class Path(FakeAPIObject):
         def __post_init__(self):
             # handle pesky jinja2.Undefined sneaking in here and messing up rende
             if not isinstance(self.database, (type(None), str)):
    -            raise CompilationException("Got an invalid path database: {}".format(self.database))
    +            raise CompilationError("Got an invalid path database: {}".format(self.database))
             if not isinstance(self.schema, (type(None), str)):
    -            raise CompilationException("Got an invalid path schema: {}".format(self.schema))
    +            raise CompilationError("Got an invalid path schema: {}".format(self.schema))
             if not isinstance(self.identifier, (type(None), str)):
    -            raise CompilationException(
    -                "Got an invalid path identifier: {}".format(self.identifier)
    -            )
    +            raise CompilationError("Got an invalid path identifier: {}".format(self.identifier))
     
         def get_lowered_part(self, key: ComponentName) -> Optional[str]:
             part = self.get_part(key)
    diff --git a/core/dbt/contracts/results.py b/core/dbt/contracts/results.py
    index 97c43396e33..4378d207ac2 100644
    --- a/core/dbt/contracts/results.py
    +++ b/core/dbt/contracts/results.py
    @@ -7,13 +7,13 @@
         Replaceable,
         schema_version,
     )
    -from dbt.exceptions import InternalException
    +from dbt.exceptions import DbtInternalError
     from dbt.events.functions import fire_event
     from dbt.events.types import TimingInfoCollected
     from dbt.events.proto_types import RunResultMsg, TimingInfoMsg
     from dbt.events.contextvars import get_node_info
     from dbt.logger import TimingProcessor
    -from dbt.utils import lowercase, cast_to_str, cast_to_int
    +from dbt.utils import lowercase, cast_to_str, cast_to_int, cast_dict_to_dict_of_strings
     from dbt.dataclass_schema import dbtClassMixin, StrEnum
     
     import agate
    @@ -130,7 +130,6 @@ def __pre_deserialize__(cls, data):
             return data
     
         def to_msg(self):
    -        # TODO: add more fields
             msg = RunResultMsg()
             msg.status = str(self.status)
             msg.message = cast_to_str(self.message)
    @@ -138,7 +137,7 @@ def to_msg(self):
             msg.execution_time = self.execution_time
             msg.num_failures = cast_to_int(self.failures)
             msg.timing_info = [ti.to_msg() for ti in self.timing]
    -        # adapter_response
    +        msg.adapter_response = cast_dict_to_dict_of_strings(self.adapter_response)
             return msg
     
     
    @@ -343,14 +342,14 @@ def process_freshness_result(result: FreshnessNodeResult) -> FreshnessNodeOutput
     
         # we know that this must be a SourceFreshnessResult
         if not isinstance(result, SourceFreshnessResult):
    -        raise InternalException(
    +        raise DbtInternalError(
                 "Got {} instead of a SourceFreshnessResult for a "
                 "non-error result in freshness execution!".format(type(result))
             )
         # if we're here, we must have a non-None freshness threshold
         criteria = result.node.freshness
         if criteria is None:
    -        raise InternalException(
    +        raise DbtInternalError(
                 "Somehow evaluated a freshness result for a source that has no freshness criteria!"
             )
         return SourceFreshnessOutput(
    diff --git a/core/dbt/contracts/state.py b/core/dbt/contracts/state.py
    index 9940a0cb93d..cb135e241ac 100644
    --- a/core/dbt/contracts/state.py
    +++ b/core/dbt/contracts/state.py
    @@ -3,7 +3,7 @@
     from .results import RunResultsArtifact
     from .results import FreshnessExecutionResultArtifact
     from typing import Optional
    -from dbt.exceptions import IncompatibleSchemaException
    +from dbt.exceptions import IncompatibleSchemaError
     
     
     class PreviousState:
    @@ -19,7 +19,7 @@ def __init__(self, path: Path, current_path: Path):
             if manifest_path.exists() and manifest_path.is_file():
                 try:
                     self.manifest = WritableManifest.read_and_check_versions(str(manifest_path))
    -            except IncompatibleSchemaException as exc:
    +            except IncompatibleSchemaError as exc:
                     exc.add_filename(str(manifest_path))
                     raise
     
    @@ -27,7 +27,7 @@ def __init__(self, path: Path, current_path: Path):
             if results_path.exists() and results_path.is_file():
                 try:
                     self.results = RunResultsArtifact.read_and_check_versions(str(results_path))
    -            except IncompatibleSchemaException as exc:
    +            except IncompatibleSchemaError as exc:
                     exc.add_filename(str(results_path))
                     raise
     
    @@ -37,7 +37,7 @@ def __init__(self, path: Path, current_path: Path):
                     self.sources = FreshnessExecutionResultArtifact.read_and_check_versions(
                         str(sources_path)
                     )
    -            except IncompatibleSchemaException as exc:
    +            except IncompatibleSchemaError as exc:
                     exc.add_filename(str(sources_path))
                     raise
     
    @@ -47,6 +47,6 @@ def __init__(self, path: Path, current_path: Path):
                     self.sources_current = FreshnessExecutionResultArtifact.read_and_check_versions(
                         str(sources_current_path)
                     )
    -            except IncompatibleSchemaException as exc:
    +            except IncompatibleSchemaError as exc:
                     exc.add_filename(str(sources_current_path))
                     raise
    diff --git a/core/dbt/contracts/util.py b/core/dbt/contracts/util.py
    index 99f7a35c66d..d8b166b1d93 100644
    --- a/core/dbt/contracts/util.py
    +++ b/core/dbt/contracts/util.py
    @@ -5,9 +5,9 @@
     from dbt.clients.system import write_json, read_json
     from dbt import deprecations
     from dbt.exceptions import (
    -    InternalException,
    -    RuntimeException,
    -    IncompatibleSchemaException,
    +    DbtInternalError,
    +    DbtRuntimeError,
    +    IncompatibleSchemaError,
     )
     from dbt.version import __version__
     from dbt.events.functions import get_invocation_id, get_metadata_vars
    @@ -123,7 +123,7 @@ def read(cls, path: str):
             try:
                 data = read_json(path)
             except (EnvironmentError, ValueError) as exc:
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     f'Could not read {cls.__name__} at "{path}" as JSON: {exc}'
                 ) from exc
     
    @@ -283,7 +283,7 @@ def upgrade_manifest_json(manifest: dict) -> dict:
             if "root_path" in exposure_content:
                 del exposure_content["root_path"]
         for source_content in manifest.get("sources", {}).values():
    -        if "root_path" in exposure_content:
    +        if "root_path" in source_content:
                 del source_content["root_path"]
         for macro_content in manifest.get("macros", {}).values():
             if "root_path" in macro_content:
    @@ -320,7 +320,7 @@ def read_and_check_versions(cls, path: str):
             try:
                 data = read_json(path)
             except (EnvironmentError, ValueError) as exc:
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     f'Could not read {cls.__name__} at "{path}" as JSON: {exc}'
                 ) from exc
     
    @@ -332,7 +332,7 @@ def read_and_check_versions(cls, path: str):
                     previous_schema_version = data["metadata"]["dbt_schema_version"]
                     # cls.dbt_schema_version is a SchemaVersion object
                     if not cls.is_compatible_version(previous_schema_version):
    -                    raise IncompatibleSchemaException(
    +                    raise IncompatibleSchemaError(
                             expected=str(cls.dbt_schema_version),
                             found=previous_schema_version,
                         )
    @@ -357,7 +357,7 @@ class ArtifactMixin(VersionedSchema, Writable, Readable):
         def validate(cls, data):
             super().validate(data)
             if cls.dbt_schema_version is None:
    -            raise InternalException("Cannot call from_dict with no schema version!")
    +            raise DbtInternalError("Cannot call from_dict with no schema version!")
     
     
     class Identifier(ValidatedStringMixin):
    diff --git a/core/dbt/deps/git.py b/core/dbt/deps/git.py
    index 683ce2c4dc7..a46ab91e7d2 100644
    --- a/core/dbt/deps/git.py
    +++ b/core/dbt/deps/git.py
    @@ -10,7 +10,7 @@
         GitPackage,
     )
     from dbt.deps.base import PinnedPackage, UnpinnedPackage, get_downloads_path
    -from dbt.exceptions import ExecutableError, MultipleVersionGitDeps
    +from dbt.exceptions import ExecutableError, MultipleVersionGitDepsError
     from dbt.events.functions import fire_event, warn_or_error
     from dbt.events.types import EnsureGitInstalled, DepsUnpinned
     
    @@ -146,7 +146,7 @@ def resolved(self) -> GitPinnedPackage:
             if len(requested) == 0:
                 requested = {"HEAD"}
             elif len(requested) > 1:
    -            raise MultipleVersionGitDeps(self.git, requested)
    +            raise MultipleVersionGitDepsError(self.git, requested)
     
             return GitPinnedPackage(
                 git=self.git,
    diff --git a/core/dbt/deps/registry.py b/core/dbt/deps/registry.py
    index f3398f4b16f..e1f39a7551d 100644
    --- a/core/dbt/deps/registry.py
    +++ b/core/dbt/deps/registry.py
    @@ -10,10 +10,10 @@
     )
     from dbt.deps.base import PinnedPackage, UnpinnedPackage
     from dbt.exceptions import (
    -    DependencyException,
    -    PackageNotFound,
    -    PackageVersionNotFound,
    -    VersionsNotCompatibleException,
    +    DependencyError,
    +    PackageNotFoundError,
    +    PackageVersionNotFoundError,
    +    VersionsNotCompatibleError,
     )
     
     
    @@ -71,7 +71,7 @@ def __init__(
         def _check_in_index(self):
             index = registry.index_cached()
             if self.package not in index:
    -            raise PackageNotFound(self.package)
    +            raise PackageNotFoundError(self.package)
     
         @classmethod
         def from_contract(cls, contract: RegistryPackage) -> "RegistryUnpinnedPackage":
    @@ -95,9 +95,9 @@ def resolved(self) -> RegistryPinnedPackage:
             self._check_in_index()
             try:
                 range_ = semver.reduce_versions(*self.versions)
    -        except VersionsNotCompatibleException as e:
    +        except VersionsNotCompatibleError as e:
                 new_msg = "Version error for package {}: {}".format(self.name, e)
    -            raise DependencyException(new_msg) from e
    +            raise DependencyError(new_msg) from e
     
             should_version_check = bool(flags.VERSION_CHECK)
             dbt_version = get_installed_version()
    @@ -118,7 +118,9 @@ def resolved(self) -> RegistryPinnedPackage:
                 target = None
             if not target:
                 # raise an exception if no installable target version is found
    -            raise PackageVersionNotFound(self.package, range_, installable, should_version_check)
    +            raise PackageVersionNotFoundError(
    +                self.package, range_, installable, should_version_check
    +            )
             latest_compatible = installable[-1]
             return RegistryPinnedPackage(
                 package=self.package, version=target, version_latest=latest_compatible
    diff --git a/core/dbt/deps/resolver.py b/core/dbt/deps/resolver.py
    index b83a3bdee7d..52758f6bb5c 100644
    --- a/core/dbt/deps/resolver.py
    +++ b/core/dbt/deps/resolver.py
    @@ -2,10 +2,10 @@
     from typing import Dict, List, NoReturn, Union, Type, Iterator, Set, Any
     
     from dbt.exceptions import (
    -    DuplicateDependencyToRoot,
    -    DuplicateProjectDependency,
    -    MismatchedDependencyTypes,
    -    InternalException,
    +    DuplicateDependencyToRootError,
    +    DuplicateProjectDependencyError,
    +    MismatchedDependencyTypeError,
    +    DbtInternalError,
     )
     
     from dbt.config import Project
    @@ -56,7 +56,7 @@ def __setitem__(self, key: BasePackage, value):
             self.packages[key_str] = value
     
         def _mismatched_types(self, old: UnpinnedPackage, new: UnpinnedPackage) -> NoReturn:
    -        raise MismatchedDependencyTypes(new, old)
    +        raise MismatchedDependencyTypeError(new, old)
     
         def incorporate(self, package: UnpinnedPackage):
             key: str = self._pick_key(package)
    @@ -80,7 +80,7 @@ def update_from(self, src: List[PackageContract]) -> None:
                 elif isinstance(contract, RegistryPackage):
                     pkg = RegistryUnpinnedPackage.from_contract(contract)
                 else:
    -                raise InternalException("Invalid package type {}".format(type(contract)))
    +                raise DbtInternalError("Invalid package type {}".format(type(contract)))
                 self.incorporate(pkg)
     
         @classmethod
    @@ -107,9 +107,9 @@ def _check_for_duplicate_project_names(
         for package in final_deps:
             project_name = package.get_project_name(project, renderer)
             if project_name in seen:
    -            raise DuplicateProjectDependency(project_name)
    +            raise DuplicateProjectDependencyError(project_name)
             elif project_name == project.project_name:
    -            raise DuplicateDependencyToRoot(project_name)
    +            raise DuplicateDependencyToRootError(project_name)
             seen.add(project_name)
     
     
    diff --git a/core/dbt/docs/build/html/searchindex.js b/core/dbt/docs/build/html/searchindex.js
    index 36036732601..1fd56412ddf 100644
    --- a/core/dbt/docs/build/html/searchindex.js
    +++ b/core/dbt/docs/build/html/searchindex.js
    @@ -1 +1 @@
    -Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"right": 0, "now": 0, "best": 0, "wai": 0, "from": 0, "i": 0, "us": 0, "dbtrunner": 0, "we": 0, "expos": 0, "cli": 0, "main": 0, "import": 0, "cli_arg": 0, "project": 0, "dir": 0, "jaffle_shop": 0, "initi": 0, "runner": 0, "re": 0, "success": 0, "you": 0, "can": 0, "also": 0, "pass": 0, "pre": 0, "construct": 0, "object": 0, "those": 0, "instead": 0, "load": 0, "up": 0, "disk": 0, "preload": 0, "load_profil": 0, "postgr": 0, "load_project": 0, "fals": 0, "thi": 0, "For": 0, "full": 0, "exampl": 0, "code": 0, "refer": 0, "py": 0, "type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "unknown": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "have": 0, "been": 0, "explicitli": 0, "string": 0, "which": 0, "overrid": 0, "dbt_project": 0, "yml": 0, "path": 0, "directori": 0, "look": 0, "file": 0, "current": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "includ": 0, "The": 0, "name": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "configur": 0, "onli": 0, "appli": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "how": 0, "invok": 0, "command": 0, "python": 0, "runtim": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "resource_typ": 0, "select": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "model": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "project_nam": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "macro": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "How to invoke dbt commands in python runtime": [[0, "how-to-invoke-dbt-commands-in-python-runtime"]], "API documentation": [[0, "api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "resource_types": [[0, "build|resource_types"], [0, "list|resource_types"], [0, "list|resource_types"]], "select": [[0, "build|select"], [0, "compile|select"], [0, "list|select"], [0, "list|select"], [0, "run|select"], [0, "seed|select"], [0, "snapshot|select"], [0, "test|select"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "models": [[0, "compile|models"], [0, "list|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "project_name": [[0, "init|project_name"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"], [0, "dbt-section"]], "output": [[0, "list|output"], [0, "list|output"]], "output_keys": [[0, "list|output_keys"], [0, "list|output_keys"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "macro": [[0, "run-operation|macro"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}})
    \ No newline at end of file
    +Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"right": 0, "now": 0, "best": 0, "wai": 0, "from": 0, "i": 0, "us": 0, "dbtrunner": 0, "we": 0, "expos": 0, "cli": 0, "main": 0, "import": 0, "cli_arg": 0, "project": 0, "dir": 0, "jaffle_shop": 0, "initi": 0, "runner": 0, "re": 0, "success": 0, "you": 0, "can": 0, "also": 0, "pass": 0, "pre": 0, "construct": 0, "object": 0, "those": 0, "instead": 0, "load": 0, "up": 0, "disk": 0, "preload": 0, "load_profil": 0, "postgr": 0, "load_project": 0, "fals": 0, "thi": 0, "For": 0, "full": 0, "exampl": 0, "code": 0, "refer": 0, "py": 0, "type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "unknown": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "have": 0, "been": 0, "explicitli": 0, "string": 0, "which": 0, "overrid": 0, "dbt_project": 0, "yml": 0, "path": 0, "directori": 0, "look": 0, "file": 0, "current": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "includ": 0, "The": 0, "name": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "configur": 0, "onli": 0, "appli": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "how": 0, "invok": 0, "command": 0, "python": 0, "runtim": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "resource_typ": 0, "select": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "model": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "project_nam": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "macro": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "How to invoke dbt commands in python runtime": [[0, "how-to-invoke-dbt-commands-in-python-runtime"]], "API documentation": [[0, "api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "resource_types": [[0, "build|resource_types"], [0, "list|resource_types"], [0, "list|resource_types"]], "select": [[0, "build|select"], [0, "compile|select"], [0, "list|select"], [0, "list|select"], [0, "run|select"], [0, "seed|select"], [0, "snapshot|select"], [0, "test|select"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "models": [[0, "compile|models"], [0, "list|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "project_name": [[0, "init|project_name"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"], [0, "dbt-section"]], "output": [[0, "list|output"], [0, "list|output"]], "output_keys": [[0, "list|output_keys"], [0, "list|output_keys"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "macro": [[0, "run-operation|macro"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}})
    diff --git a/core/dbt/events/base_types.py b/core/dbt/events/base_types.py
    index db74016099a..fbd35b58fa1 100644
    --- a/core/dbt/events/base_types.py
    +++ b/core/dbt/events/base_types.py
    @@ -3,6 +3,13 @@
     import os
     import threading
     from datetime import datetime
    +import dbt.events.proto_types as pt
    +import sys
    +
    +if sys.version_info >= (3, 8):
    +    from typing import Protocol
    +else:
    +    from typing_extensions import Protocol
     
     # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
     # These base types define the _required structure_ for the concrete event #
    @@ -58,25 +65,20 @@ class EventLevel(str, Enum):
     class BaseEvent:
         """BaseEvent for proto message generated python events"""
     
    -    def __post_init__(self):
    -        super().__post_init__()
    -        if not self.info.level:
    -            self.info.level = self.level_tag()
    -        assert self.info.level in ["info", "warn", "error", "debug", "test"]
    -        if not hasattr(self.info, "msg") or not self.info.msg:
    -            self.info.msg = self.message()
    -        self.info.invocation_id = get_invocation_id()
    -        self.info.extra = get_global_metadata_vars()
    -        self.info.ts = datetime.utcnow()
    -        self.info.pid = get_pid()
    -        self.info.thread = get_thread_name()
    -        self.info.code = self.code()
    -        self.info.name = type(self).__name__
    -
    -    # This is here because although we know that info should always
    -    # exist, mypy doesn't.
    -    def log_level(self) -> EventLevel:
    -        return self.info.level  # type: ignore
    +    #   def __post_init__(self):
    +    #       super().__post_init__()
    +    #       if not self.info.level:
    +    #           self.info.level = self.level_tag()
    +    #       assert self.info.level in ["info", "warn", "error", "debug", "test"]
    +    #       if not hasattr(self.info, "msg") or not self.info.msg:
    +    #           self.info.msg = self.message()
    +    #       self.info.invocation_id = get_invocation_id()
    +    #       self.info.extra = get_global_metadata_vars()
    +    #       self.info.ts = datetime.utcnow()
    +    #       self.info.pid = get_pid()
    +    #       self.info.thread = get_thread_name()
    +    #       self.info.code = self.code()
    +    #       self.info.name = type(self).__name__
     
         def level_tag(self) -> EventLevel:
             return EventLevel.DEBUG
    @@ -84,6 +86,37 @@ def level_tag(self) -> EventLevel:
         def message(self) -> str:
             raise Exception("message() not implemented for event")
     
    +    def code(self) -> str:
    +        raise Exception("code() not implemented for event")
    +
    +
    +class EventMsg(Protocol):
    +    info: pt.EventInfo
    +    data: BaseEvent
    +
    +
    +def msg_from_base_event(event: BaseEvent, level: EventLevel = None):
    +
    +    msg_class_name = f"{type(event).__name__}Msg"
    +    msg_cls = getattr(pt, msg_class_name)
    +
    +    # level in EventInfo must be a string, not an EventLevel
    +    msg_level: str = level.value if level else event.level_tag().value
    +    assert msg_level is not None
    +    event_info = pt.EventInfo(
    +        level=msg_level,
    +        msg=event.message(),
    +        invocation_id=get_invocation_id(),
    +        extra=get_global_metadata_vars(),
    +        ts=datetime.utcnow(),
    +        pid=get_pid(),
    +        thread=get_thread_name(),
    +        code=event.code(),
    +        name=type(event).__name__,
    +    )
    +    new_event = msg_cls(data=event, info=event_info)
    +    return new_event
    +
     
     # DynamicLevel requires that the level be supplied on the
     # event construction call using the "info" function from functions.py
    diff --git a/core/dbt/events/eventmgr.py b/core/dbt/events/eventmgr.py
    index 97a7d5d4360..10bf225bef7 100644
    --- a/core/dbt/events/eventmgr.py
    +++ b/core/dbt/events/eventmgr.py
    @@ -9,16 +9,16 @@
     from typing import Any, Callable, List, Optional, TextIO
     from uuid import uuid4
     
    -from dbt.events.base_types import BaseEvent, EventLevel
    +from dbt.events.base_types import BaseEvent, EventLevel, msg_from_base_event, EventMsg
     
     
     # A Filter is a function which takes a BaseEvent and returns True if the event
     # should be logged, False otherwise.
    -Filter = Callable[[BaseEvent], bool]
    +Filter = Callable[[EventMsg], bool]
     
     
     # Default filter which logs every event
    -def NoFilter(_: BaseEvent) -> bool:
    +def NoFilter(_: EventMsg) -> bool:
         return True
     
     
    @@ -47,13 +47,6 @@ class LineFormat(Enum):
     }
     
     
    -# We should consider fixing the problem, but log_level() can return a string for
    -# DynamicLevel events, even thought it is supposed to return an EventLevel. This
    -# function gets a string for the level, no matter what.
    -def _get_level_str(e: BaseEvent) -> str:
    -    return e.log_level().value if isinstance(e.log_level(), EventLevel) else str(e.log_level())
    -
    -
     # We need this function for now because the numeric log severity levels in
     # Python do not match those for logbook, so we have to explicitly call the
     # correct function by name.
    @@ -113,14 +106,14 @@ def __init__(self, event_manager: "EventManager", config: LoggerConfig) -> None:
     
                 self._python_logger = log
     
    -    def create_line(self, e: BaseEvent) -> str:
    +    def create_line(self, msg: EventMsg) -> str:
             raise NotImplementedError()
     
    -    def write_line(self, e: BaseEvent):
    -        line = self.create_line(e)
    -        python_level = _log_level_map[e.log_level()]
    +    def write_line(self, msg: EventMsg):
    +        line = self.create_line(msg)
    +        python_level = _log_level_map[EventLevel(msg.info.level)]
             if self._python_logger is not None:
    -            send_to_logger(self._python_logger, _get_level_str(e), line)
    +            send_to_logger(self._python_logger, msg.info.level, line)
             elif self._stream is not None and _log_level_map[self.level] <= python_level:
                 self._stream.write(line + "\n")
     
    @@ -138,24 +131,26 @@ def __init__(self, event_manager: "EventManager", config: LoggerConfig) -> None:
             self.use_colors = config.use_colors
             self.use_debug_format = config.line_format == LineFormat.DebugText
     
    -    def create_line(self, e: BaseEvent) -> str:
    -        return self.create_debug_line(e) if self.use_debug_format else self.create_info_line(e)
    +    def create_line(self, msg: EventMsg) -> str:
    +        return self.create_debug_line(msg) if self.use_debug_format else self.create_info_line(msg)
     
    -    def create_info_line(self, e: BaseEvent) -> str:
    +    def create_info_line(self, msg: EventMsg) -> str:
             ts: str = datetime.utcnow().strftime("%H:%M:%S")
    -        scrubbed_msg: str = self.scrubber(e.message())  # type: ignore
    +        scrubbed_msg: str = self.scrubber(msg.info.msg)  # type: ignore
             return f"{self._get_color_tag()}{ts}  {scrubbed_msg}"
     
    -    def create_debug_line(self, e: BaseEvent) -> str:
    +    def create_debug_line(self, msg: EventMsg) -> str:
             log_line: str = ""
             # Create a separator if this is the beginning of an invocation
             # TODO: This is an ugly hack, get rid of it if we can
    -        if type(e).__name__ == "MainReportVersion":
    +        if msg.info.name == "MainReportVersion":
                 separator = 30 * "="
    -            log_line = f"\n\n{separator} {datetime.utcnow()} | {self.event_manager.invocation_id} {separator}\n"
    -        ts: str = datetime.utcnow().strftime("%H:%M:%S.%f")
    -        scrubbed_msg: str = self.scrubber(e.message())  # type: ignore
    -        level = _get_level_str(e)
    +            log_line = (
    +                f"\n\n{separator} {msg.info.ts} | {self.event_manager.invocation_id} {separator}\n"
    +            )
    +        ts: str = msg.info.ts.strftime("%H:%M:%S.%f")
    +        scrubbed_msg: str = self.scrubber(msg.info.msg)  # type: ignore
    +        level = msg.info.level
             log_line += (
                 f"{self._get_color_tag()}{ts} [{level:<5}]{self._get_thread_name()} {scrubbed_msg}"
             )
    @@ -175,11 +170,11 @@ def _get_thread_name(self) -> str:
     
     
     class _JsonLogger(_Logger):
    -    def create_line(self, e: BaseEvent) -> str:
    -        from dbt.events.functions import event_to_dict
    +    def create_line(self, msg: EventMsg) -> str:
    +        from dbt.events.functions import msg_to_dict
     
    -        event_dict = event_to_dict(e)
    -        raw_log_line = json.dumps(event_dict, sort_keys=True)
    +        msg_dict = msg_to_dict(msg)
    +        raw_log_line = json.dumps(msg_dict, sort_keys=True)
             line = self.scrubber(raw_log_line)  # type: ignore
             return line
     
    @@ -187,16 +182,17 @@ def create_line(self, e: BaseEvent) -> str:
     class EventManager:
         def __init__(self) -> None:
             self.loggers: List[_Logger] = []
    -        self.callbacks: List[Callable[[BaseEvent], None]] = []
    +        self.callbacks: List[Callable[[EventMsg], None]] = []
             self.invocation_id: str = str(uuid4())
     
    -    def fire_event(self, e: BaseEvent) -> None:
    +    def fire_event(self, e: BaseEvent, level: EventLevel = None) -> None:
    +        msg = msg_from_base_event(e, level=level)
             for logger in self.loggers:
    -            if logger.filter(e):  # type: ignore
    -                logger.write_line(e)
    +            if logger.filter(msg):  # type: ignore
    +                logger.write_line(msg)
     
             for callback in self.callbacks:
    -            callback(e)
    +            callback(msg)
     
         def add_logger(self, config: LoggerConfig):
             logger = (
    diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py
    index ff5b267bc5e..00407b538bd 100644
    --- a/core/dbt/events/functions.py
    +++ b/core/dbt/events/functions.py
    @@ -1,10 +1,9 @@
     import betterproto
     from dbt.constants import METADATA_ENV_PREFIX
    -from dbt.events.base_types import BaseEvent, Cache, EventLevel, NoFile, NoStdOut
    +from dbt.events.base_types import BaseEvent, Cache, EventLevel, NoFile, NoStdOut, EventMsg
     from dbt.events.eventmgr import EventManager, LoggerConfig, LineFormat, NoFilter
     from dbt.events.helpers import env_secrets, scrub_secrets
    -from dbt.events.proto_types import EventInfo
    -from dbt.events.types import EmptyLine
    +from dbt.events.types import Formatting
     import dbt.flags as flags
     from dbt.logger import GLOBAL_LOGGER, make_log_dir_if_missing
     from functools import partial
    @@ -64,14 +63,14 @@ def _get_stdout_config(log_format: str, debug: bool, use_colors: bool) -> Logger
     
     
     def _stdout_filter(
    -    log_cache_events: bool, debug_mode: bool, quiet_mode: bool, log_format: str, evt: BaseEvent
    +    log_cache_events: bool, debug_mode: bool, quiet_mode: bool, log_format: str, msg: EventMsg
     ) -> bool:
         return (
    -        not isinstance(evt, NoStdOut)
    -        and (not isinstance(evt, Cache) or log_cache_events)
    -        and (evt.log_level() != EventLevel.DEBUG or debug_mode)
    -        and (evt.log_level() == EventLevel.ERROR or not quiet_mode)
    -        and not (log_format == "json" and type(evt) == EmptyLine)
    +        not isinstance(msg.data, NoStdOut)
    +        and (not isinstance(msg.data, Cache) or log_cache_events)
    +        and (EventLevel(msg.info.level) != EventLevel.DEBUG or debug_mode)
    +        and (EventLevel(msg.info.level) == EventLevel.ERROR or not quiet_mode)
    +        and not (log_format == "json" and type(msg.data) == Formatting)
         )
     
     
    @@ -87,11 +86,11 @@ def _get_logfile_config(log_path: str, use_colors: bool, log_format: str) -> Log
         )
     
     
    -def _logfile_filter(log_cache_events: bool, log_format: str, evt: BaseEvent) -> bool:
    +def _logfile_filter(log_cache_events: bool, log_format: str, msg: EventMsg) -> bool:
         return (
    -        not isinstance(evt, NoFile)
    -        and not (isinstance(evt, Cache) and not log_cache_events)
    -        and not (log_format == "json" and type(evt) == EmptyLine)
    +        not isinstance(msg.data, NoFile)
    +        and not (isinstance(msg.data, Cache) and not log_cache_events)
    +        and not (log_format == "json" and type(msg.data) == Formatting)
         )
     
     
    @@ -99,7 +98,7 @@ def _get_logbook_log_config(debug: bool) -> LoggerConfig:
         # use the default one since this code should be removed when we remove logbook
         config = _get_stdout_config("", debug, bool(flags.USE_COLORS))
         config.name = "logbook_log"
    -    config.filter = NoFilter if flags.LOG_CACHE_EVENTS else lambda e: not isinstance(e, Cache)
    +    config.filter = NoFilter if flags.LOG_CACHE_EVENTS else lambda e: not isinstance(e.data, Cache)
         config.logger = GLOBAL_LOGGER
         return config
     
    @@ -145,48 +144,58 @@ def stop_capture_stdout_logs():
     
     # returns a dictionary representation of the event fields.
     # the message may contain secrets which must be scrubbed at the usage site.
    -def event_to_json(event: BaseEvent) -> str:
    -    event_dict = event_to_dict(event)
    -    raw_log_line = json.dumps(event_dict, sort_keys=True)
    +def msg_to_json(msg: EventMsg) -> str:
    +    msg_dict = msg_to_dict(msg)
    +    raw_log_line = json.dumps(msg_dict, sort_keys=True)
         return raw_log_line
     
     
    -def event_to_dict(event: BaseEvent) -> dict:
    -    event_dict = dict()
    +def msg_to_dict(msg: EventMsg) -> dict:
    +    msg_dict = dict()
         try:
    -        event_dict = event.to_dict(casing=betterproto.Casing.SNAKE, include_default_values=True)  # type: ignore
    +        msg_dict = msg.to_dict(casing=betterproto.Casing.SNAKE, include_default_values=True)  # type: ignore
         except AttributeError as exc:
    -        event_type = type(event).__name__
    +        event_type = type(msg).__name__
             raise Exception(f"type {event_type} is not serializable. {str(exc)}")
         # We don't want an empty NodeInfo in output
    -    if "node_info" in event_dict and event_dict["node_info"]["node_name"] == "":
    -        del event_dict["node_info"]
    -    return event_dict
    +    if (
    +        "data" in msg_dict
    +        and "node_info" in msg_dict["data"]
    +        and msg_dict["data"]["node_info"]["node_name"] == ""
    +    ):
    +        del msg_dict["data"]["node_info"]
    +    return msg_dict
     
     
     def warn_or_error(event, node=None):
    -    if flags.WARN_ERROR:
    +    # TODO: resolve this circular import when flags.WARN_ERROR_OPTIONS is WarnErrorOptions type via click CLI.
    +    from dbt.helper_types import WarnErrorOptions
    +
    +    warn_error_options = WarnErrorOptions.from_yaml_string(flags.WARN_ERROR_OPTIONS)
    +    if flags.WARN_ERROR or warn_error_options.includes(type(event).__name__):
             # TODO: resolve this circular import when at top
    -        from dbt.exceptions import EventCompilationException
    +        from dbt.exceptions import EventCompilationError
     
    -        raise EventCompilationException(event.info.msg, node)
    +        raise EventCompilationError(event.message(), node)
         else:
             fire_event(event)
     
     
     # an alternative to fire_event which only creates and logs the event value
     # if the condition is met. Does nothing otherwise.
    -def fire_event_if(conditional: bool, lazy_e: Callable[[], BaseEvent]) -> None:
    +def fire_event_if(
    +    conditional: bool, lazy_e: Callable[[], BaseEvent], level: EventLevel = None
    +) -> None:
         if conditional:
    -        fire_event(lazy_e())
    +        fire_event(lazy_e(), level=level)
     
     
     # top-level method for accessing the new eventing system
     # this is where all the side effects happen branched by event type
     # (i.e. - mutating the event history, printing to stdout, logging
     # to files, etc.)
    -def fire_event(e: BaseEvent) -> None:
    -    EVENT_MANAGER.fire_event(e)
    +def fire_event(e: BaseEvent, level: EventLevel = None) -> None:
    +    EVENT_MANAGER.fire_event(e, level=level)
     
     
     def get_metadata_vars() -> Dict[str, str]:
    @@ -213,11 +222,3 @@ def set_invocation_id() -> None:
         # This is primarily for setting the invocation_id for separate
         # commands in the dbt servers. It shouldn't be necessary for the CLI.
         EVENT_MANAGER.invocation_id = str(uuid.uuid4())
    -
    -
    -# Currently used to set the level in EventInfo, so logging events can
    -# provide more than one "level". Might be used in the future to set
    -# more fields in EventInfo, once some of that information is no longer global
    -def info(level="info"):
    -    info = EventInfo(level=level)
    -    return info
    diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py
    index 37fe69453f6..5decf0713fc 100644
    --- a/core/dbt/events/proto_types.py
    +++ b/core/dbt/events/proto_types.py
    @@ -47,6 +47,9 @@ class NodeInfo(betterproto.Message):
         node_status: str = betterproto.string_field(6)
         node_started_at: str = betterproto.string_field(7)
         node_finished_at: str = betterproto.string_field(8)
    +    meta: Dict[str, str] = betterproto.map_field(
    +        9, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    +    )
     
     
     @dataclass
    @@ -91,2101 +94,2747 @@ class GenericMessage(betterproto.Message):
     class MainReportVersion(betterproto.Message):
         """A001"""
     
    +    version: str = betterproto.string_field(1)
    +    log_version: int = betterproto.int32_field(2)
    +
    +
    +@dataclass
    +class MainReportVersionMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    version: str = betterproto.string_field(2)
    -    log_version: int = betterproto.int32_field(3)
    +    data: "MainReportVersion" = betterproto.message_field(2)
     
     
     @dataclass
     class MainReportArgs(betterproto.Message):
         """A002"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
         args: Dict[str, str] = betterproto.map_field(
    -        2, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    +        1, betterproto.TYPE_STRING, betterproto.TYPE_STRING
         )
     
     
    +@dataclass
    +class MainReportArgsMsg(betterproto.Message):
    +    info: "EventInfo" = betterproto.message_field(1)
    +    data: "MainReportArgs" = betterproto.message_field(2)
    +
    +
     @dataclass
     class MainTrackingUserState(betterproto.Message):
         """A003"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    user_state: str = betterproto.string_field(2)
    +    user_state: str = betterproto.string_field(1)
     
     
     @dataclass
    -class MergedFromState(betterproto.Message):
    -    """A004"""
    -
    +class MainTrackingUserStateMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    num_merged: int = betterproto.int32_field(2)
    -    sample: List[str] = betterproto.string_field(3)
    +    data: "MainTrackingUserState" = betterproto.message_field(2)
     
     
     @dataclass
    -class MissingProfileTarget(betterproto.Message):
    -    """A005"""
    +class MergedFromState(betterproto.Message):
    +    """A004"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    profile_name: str = betterproto.string_field(2)
    -    target_name: str = betterproto.string_field(3)
    +    num_merged: int = betterproto.int32_field(1)
    +    sample: List[str] = betterproto.string_field(2)
     
     
     @dataclass
    -class InvalidVarsYAML(betterproto.Message):
    -    """A008"""
    -
    +class MergedFromStateMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "MergedFromState" = betterproto.message_field(2)
     
     
     @dataclass
    -class DbtProjectError(betterproto.Message):
    -    """A009"""
    +class MissingProfileTarget(betterproto.Message):
    +    """A005"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    profile_name: str = betterproto.string_field(1)
    +    target_name: str = betterproto.string_field(2)
     
     
     @dataclass
    -class DbtProjectErrorException(betterproto.Message):
    -    """A010"""
    -
    +class MissingProfileTargetMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    exc: str = betterproto.string_field(2)
    +    data: "MissingProfileTarget" = betterproto.message_field(2)
     
     
     @dataclass
    -class DbtProfileError(betterproto.Message):
    -    """A011"""
    +class InvalidOptionYAML(betterproto.Message):
    +    """A008"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    option_name: str = betterproto.string_field(1)
     
     
     @dataclass
    -class DbtProfileErrorException(betterproto.Message):
    -    """A012"""
    -
    +class InvalidOptionYAMLMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    exc: str = betterproto.string_field(2)
    +    data: "InvalidOptionYAML" = betterproto.message_field(2)
     
     
     @dataclass
    -class ProfileListTitle(betterproto.Message):
    -    """A013"""
    +class LogDbtProjectError(betterproto.Message):
    +    """A009"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    exc: str = betterproto.string_field(1)
     
     
     @dataclass
    -class ListSingleProfile(betterproto.Message):
    -    """A014"""
    -
    +class LogDbtProjectErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    profile: str = betterproto.string_field(2)
    +    data: "LogDbtProjectError" = betterproto.message_field(2)
     
     
     @dataclass
    -class NoDefinedProfiles(betterproto.Message):
    -    """A015"""
    +class LogDbtProfileError(betterproto.Message):
    +    """A011"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    exc: str = betterproto.string_field(1)
    +    profiles: List[str] = betterproto.string_field(2)
     
     
     @dataclass
    -class ProfileHelpMessage(betterproto.Message):
    -    """A016"""
    -
    +class LogDbtProfileErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "LogDbtProfileError" = betterproto.message_field(2)
     
     
     @dataclass
     class StarterProjectPath(betterproto.Message):
         """A017"""
     
    +    dir: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class StarterProjectPathMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    dir: str = betterproto.string_field(2)
    +    data: "StarterProjectPath" = betterproto.message_field(2)
     
     
     @dataclass
     class ConfigFolderDirectory(betterproto.Message):
         """A018"""
     
    +    dir: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class ConfigFolderDirectoryMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    dir: str = betterproto.string_field(2)
    +    data: "ConfigFolderDirectory" = betterproto.message_field(2)
     
     
     @dataclass
     class NoSampleProfileFound(betterproto.Message):
         """A019"""
     
    +    adapter: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class NoSampleProfileFoundMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    adapter: str = betterproto.string_field(2)
    +    data: "NoSampleProfileFound" = betterproto.message_field(2)
     
     
     @dataclass
     class ProfileWrittenWithSample(betterproto.Message):
         """A020"""
     
    +    name: str = betterproto.string_field(1)
    +    path: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class ProfileWrittenWithSampleMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    name: str = betterproto.string_field(2)
    -    path: str = betterproto.string_field(3)
    +    data: "ProfileWrittenWithSample" = betterproto.message_field(2)
     
     
     @dataclass
     class ProfileWrittenWithTargetTemplateYAML(betterproto.Message):
         """A021"""
     
    +    name: str = betterproto.string_field(1)
    +    path: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class ProfileWrittenWithTargetTemplateYAMLMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    name: str = betterproto.string_field(2)
    -    path: str = betterproto.string_field(3)
    +    data: "ProfileWrittenWithTargetTemplateYAMLMsg" = betterproto.message_field(2)
     
     
     @dataclass
     class ProfileWrittenWithProjectTemplateYAML(betterproto.Message):
         """A022"""
     
    +    name: str = betterproto.string_field(1)
    +    path: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class ProfileWrittenWithProjectTemplateYAMLMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    name: str = betterproto.string_field(2)
    -    path: str = betterproto.string_field(3)
    +    data: "ProfileWrittenWithProjectTemplateYAML" = betterproto.message_field(2)
     
     
     @dataclass
     class SettingUpProfile(betterproto.Message):
         """A023"""
     
    +    pass
    +
    +
    +@dataclass
    +class SettingUpProfileMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "SettingUpProfile" = betterproto.message_field(2)
     
     
     @dataclass
     class InvalidProfileTemplateYAML(betterproto.Message):
         """A024"""
     
    +    pass
    +
    +
    +@dataclass
    +class InvalidProfileTemplateYAMLMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "InvalidProfileTemplateYAML" = betterproto.message_field(2)
     
     
     @dataclass
     class ProjectNameAlreadyExists(betterproto.Message):
         """A025"""
     
    +    name: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class ProjectNameAlreadyExistsMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    name: str = betterproto.string_field(2)
    +    data: "ProjectNameAlreadyExists" = betterproto.message_field(2)
     
     
     @dataclass
     class ProjectCreated(betterproto.Message):
         """A026"""
     
    +    project_name: str = betterproto.string_field(1)
    +    docs_url: str = betterproto.string_field(2)
    +    slack_url: str = betterproto.string_field(3)
    +
    +
    +@dataclass
    +class ProjectCreatedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    project_name: str = betterproto.string_field(2)
    -    docs_url: str = betterproto.string_field(3)
    -    slack_url: str = betterproto.string_field(4)
    +    data: "ProjectCreated" = betterproto.message_field(2)
     
     
     @dataclass
     class PackageRedirectDeprecation(betterproto.Message):
         """D001"""
     
    +    old_name: str = betterproto.string_field(1)
    +    new_name: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class PackageRedirectDeprecationMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    old_name: str = betterproto.string_field(2)
    -    new_name: str = betterproto.string_field(3)
    +    data: "PackageRedirectDeprecation" = betterproto.message_field(2)
     
     
     @dataclass
     class PackageInstallPathDeprecation(betterproto.Message):
         """D002"""
     
    +    pass
    +
    +
    +@dataclass
    +class PackageInstallPathDeprecationMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "PackageInstallPathDeprecation" = betterproto.message_field(2)
     
     
     @dataclass
     class ConfigSourcePathDeprecation(betterproto.Message):
         """D003"""
     
    +    deprecated_path: str = betterproto.string_field(1)
    +    exp_path: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class ConfigSourcePathDeprecationMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    deprecated_path: str = betterproto.string_field(2)
    -    exp_path: str = betterproto.string_field(3)
    +    data: "ConfigSourcePathDeprecation" = betterproto.message_field(2)
     
     
     @dataclass
     class ConfigDataPathDeprecation(betterproto.Message):
         """D004"""
     
    +    deprecated_path: str = betterproto.string_field(1)
    +    exp_path: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class ConfigDataPathDeprecationMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    deprecated_path: str = betterproto.string_field(2)
    -    exp_path: str = betterproto.string_field(3)
    +    data: "ConfigDataPathDeprecation" = betterproto.message_field(2)
     
     
     @dataclass
     class AdapterDeprecationWarning(betterproto.Message):
         """D005"""
     
    +    old_name: str = betterproto.string_field(1)
    +    new_name: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class AdapterDeprecationWarningMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    old_name: str = betterproto.string_field(2)
    -    new_name: str = betterproto.string_field(3)
    +    data: "AdapterDeprecationWarning" = betterproto.message_field(2)
     
     
     @dataclass
     class MetricAttributesRenamed(betterproto.Message):
         """D006"""
     
    +    metric_name: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class MetricAttributesRenamedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    metric_name: str = betterproto.string_field(2)
    +    data: "MetricAttributesRenamed" = betterproto.message_field(2)
     
     
     @dataclass
     class ExposureNameDeprecation(betterproto.Message):
         """D007"""
     
    +    exposure: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class ExposureNameDeprecationMsg(betterproto.Message):
    +    info: "EventInfo" = betterproto.message_field(1)
    +    data: "ExposureNameDeprecation" = betterproto.message_field(2)
    +
    +
    +@dataclass
    +class InternalDeprecation(betterproto.Message):
    +    """D008"""
    +
    +    name: str = betterproto.string_field(1)
    +    reason: str = betterproto.string_field(2)
    +    suggested_action: str = betterproto.string_field(3)
    +    version: str = betterproto.string_field(4)
    +
    +
    +@dataclass
    +class InternalDeprecationMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    exposure: str = betterproto.string_field(2)
    +    data: "InternalDeprecation" = betterproto.message_field(2)
     
     
     @dataclass
     class AdapterEventDebug(betterproto.Message):
         """E001"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    name: str = betterproto.string_field(2)
    +    base_msg: str = betterproto.string_field(3)
    +    args: List[str] = betterproto.string_field(4)
    +
    +
    +@dataclass
    +class AdapterEventDebugMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    name: str = betterproto.string_field(3)
    -    base_msg: str = betterproto.string_field(4)
    -    args: List[str] = betterproto.string_field(5)
    +    data: "AdapterEventDebug" = betterproto.message_field(2)
     
     
     @dataclass
     class AdapterEventInfo(betterproto.Message):
         """E002"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    name: str = betterproto.string_field(2)
    +    base_msg: str = betterproto.string_field(3)
    +    args: List[str] = betterproto.string_field(4)
    +
    +
    +@dataclass
    +class AdapterEventInfoMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    name: str = betterproto.string_field(3)
    -    base_msg: str = betterproto.string_field(4)
    -    args: List[str] = betterproto.string_field(5)
    +    data: "AdapterEventInfo" = betterproto.message_field(2)
     
     
     @dataclass
     class AdapterEventWarning(betterproto.Message):
         """E003"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    name: str = betterproto.string_field(2)
    +    base_msg: str = betterproto.string_field(3)
    +    args: List[str] = betterproto.string_field(4)
    +
    +
    +@dataclass
    +class AdapterEventWarningMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    name: str = betterproto.string_field(3)
    -    base_msg: str = betterproto.string_field(4)
    -    args: List[str] = betterproto.string_field(5)
    +    data: "AdapterEventWarning" = betterproto.message_field(2)
     
     
     @dataclass
     class AdapterEventError(betterproto.Message):
         """E004"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    name: str = betterproto.string_field(2)
    +    base_msg: str = betterproto.string_field(3)
    +    args: List[str] = betterproto.string_field(4)
    +    exc_info: str = betterproto.string_field(5)
    +
    +
    +@dataclass
    +class AdapterEventErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    name: str = betterproto.string_field(3)
    -    base_msg: str = betterproto.string_field(4)
    -    args: List[str] = betterproto.string_field(5)
    -    exc_info: str = betterproto.string_field(6)
    +    data: "AdapterEventError" = betterproto.message_field(2)
     
     
     @dataclass
     class NewConnection(betterproto.Message):
         """E005"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    conn_type: str = betterproto.string_field(2)
    +    conn_name: str = betterproto.string_field(3)
    +
    +
    +@dataclass
    +class NewConnectionMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    conn_type: str = betterproto.string_field(3)
    -    conn_name: str = betterproto.string_field(4)
    +    data: "NewConnection" = betterproto.message_field(2)
     
     
     @dataclass
     class ConnectionReused(betterproto.Message):
         """E006"""
     
    +    conn_name: str = betterproto.string_field(1)
    +    orig_conn_name: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class ConnectionReusedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    conn_name: str = betterproto.string_field(2)
    +    data: "ConnectionReused" = betterproto.message_field(2)
     
     
     @dataclass
     class ConnectionLeftOpenInCleanup(betterproto.Message):
         """E007"""
     
    +    conn_name: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class ConnectionLeftOpenInCleanupMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    conn_name: str = betterproto.string_field(2)
    +    data: "ConnectionLeftOpen" = betterproto.message_field(2)
     
     
     @dataclass
     class ConnectionClosedInCleanup(betterproto.Message):
         """E008"""
     
    +    conn_name: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class ConnectionClosedInCleanupMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    conn_name: str = betterproto.string_field(2)
    +    data: "ConnectionClosedInCleanup" = betterproto.message_field(2)
     
     
     @dataclass
     class RollbackFailed(betterproto.Message):
         """E009"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    conn_name: str = betterproto.string_field(2)
    +    exc_info: str = betterproto.string_field(3)
    +
    +
    +@dataclass
    +class RollbackFailedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    conn_name: str = betterproto.string_field(3)
    -    exc_info: str = betterproto.string_field(4)
    +    data: "RollbackFailed" = betterproto.message_field(2)
     
     
     @dataclass
     class ConnectionClosed(betterproto.Message):
         """E010"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    conn_name: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class ConnectionClosedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    conn_name: str = betterproto.string_field(3)
    +    data: "ConnectionClosed" = betterproto.message_field(2)
     
     
     @dataclass
     class ConnectionLeftOpen(betterproto.Message):
         """E011"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    conn_name: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class ConnectionLeftOpenMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    conn_name: str = betterproto.string_field(3)
    +    data: "ConnectionLeftOpen" = betterproto.message_field(2)
     
     
     @dataclass
     class Rollback(betterproto.Message):
         """E012"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    conn_name: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class RollbackMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    conn_name: str = betterproto.string_field(3)
    +    data: "Rollback" = betterproto.message_field(2)
     
     
     @dataclass
     class CacheMiss(betterproto.Message):
         """E013"""
     
    +    conn_name: str = betterproto.string_field(1)
    +    database: str = betterproto.string_field(2)
    +    schema: str = betterproto.string_field(3)
    +
    +
    +@dataclass
    +class CacheMissMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    conn_name: str = betterproto.string_field(2)
    -    database: str = betterproto.string_field(3)
    -    schema: str = betterproto.string_field(4)
    +    data: "CacheMiss" = betterproto.message_field(2)
     
     
     @dataclass
     class ListRelations(betterproto.Message):
         """E014"""
     
    +    database: str = betterproto.string_field(1)
    +    schema: str = betterproto.string_field(2)
    +    relations: List["ReferenceKeyMsg"] = betterproto.message_field(3)
    +
    +
    +@dataclass
    +class ListRelationsMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    database: str = betterproto.string_field(2)
    -    schema: str = betterproto.string_field(3)
    -    relations: List["ReferenceKeyMsg"] = betterproto.message_field(4)
    +    data: "ListRelations" = betterproto.message_field(2)
     
     
     @dataclass
     class ConnectionUsed(betterproto.Message):
         """E015"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    conn_type: str = betterproto.string_field(2)
    +    conn_name: str = betterproto.string_field(3)
    +
    +
    +@dataclass
    +class ConnectionUsedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    conn_type: str = betterproto.string_field(3)
    -    conn_name: str = betterproto.string_field(4)
    +    data: "ConnectionUsed" = betterproto.message_field(2)
     
     
     @dataclass
     class SQLQuery(betterproto.Message):
         """E016"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    conn_name: str = betterproto.string_field(2)
    +    sql: str = betterproto.string_field(3)
    +
    +
    +@dataclass
    +class SQLQueryMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    conn_name: str = betterproto.string_field(3)
    -    sql: str = betterproto.string_field(4)
    +    data: "SQLQuery" = betterproto.message_field(2)
     
     
     @dataclass
     class SQLQueryStatus(betterproto.Message):
         """E017"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    status: str = betterproto.string_field(2)
    +    elapsed: float = betterproto.float_field(3)
    +
    +
    +@dataclass
    +class SQLQueryStatusMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    status: str = betterproto.string_field(3)
    -    elapsed: float = betterproto.float_field(4)
    +    data: "SQLQueryStatus" = betterproto.message_field(2)
     
     
     @dataclass
     class SQLCommit(betterproto.Message):
         """E018"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    conn_name: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class SQLCommitMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    conn_name: str = betterproto.string_field(3)
    +    data: "SQLCommit" = betterproto.message_field(2)
     
     
     @dataclass
     class ColTypeChange(betterproto.Message):
         """E019"""
     
    +    orig_type: str = betterproto.string_field(1)
    +    new_type: str = betterproto.string_field(2)
    +    table: "ReferenceKeyMsg" = betterproto.message_field(3)
    +
    +
    +@dataclass
    +class ColTypeChangeMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    orig_type: str = betterproto.string_field(2)
    -    new_type: str = betterproto.string_field(3)
    -    table: "ReferenceKeyMsg" = betterproto.message_field(4)
    +    data: "ColTypeChange" = betterproto.message_field(2)
     
     
     @dataclass
     class SchemaCreation(betterproto.Message):
         """E020"""
     
    +    relation: "ReferenceKeyMsg" = betterproto.message_field(1)
    +
    +
    +@dataclass
    +class SchemaCreationMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    relation: "ReferenceKeyMsg" = betterproto.message_field(2)
    +    data: "SchemaCreation" = betterproto.message_field(2)
     
     
     @dataclass
     class SchemaDrop(betterproto.Message):
         """E021"""
     
    +    relation: "ReferenceKeyMsg" = betterproto.message_field(1)
    +
    +
    +@dataclass
    +class SchemaDropMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    relation: "ReferenceKeyMsg" = betterproto.message_field(2)
    +    data: "SchemaDrop" = betterproto.message_field(2)
     
     
     @dataclass
    -class UncachedRelation(betterproto.Message):
    +class CacheAction(betterproto.Message):
         """E022"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    dep_key: "ReferenceKeyMsg" = betterproto.message_field(2)
    -    ref_key: "ReferenceKeyMsg" = betterproto.message_field(3)
    +    action: str = betterproto.string_field(1)
    +    ref_key: "ReferenceKeyMsg" = betterproto.message_field(2)
    +    ref_key_2: "ReferenceKeyMsg" = betterproto.message_field(3)
    +    ref_key_3: "ReferenceKeyMsg" = betterproto.message_field(4)
    +    ref_list: List["ReferenceKeyMsg"] = betterproto.message_field(5)
     
     
     @dataclass
    -class AddLink(betterproto.Message):
    -    """E023"""
    -
    +class CacheActionMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    dep_key: "ReferenceKeyMsg" = betterproto.message_field(2)
    -    ref_key: "ReferenceKeyMsg" = betterproto.message_field(3)
    +    data: "CacheAction" = betterproto.message_field(2)
     
     
     @dataclass
    -class AddRelation(betterproto.Message):
    -    """E024"""
    +class CacheDumpGraph(betterproto.Message):
    +    """E031"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    relation: "ReferenceKeyMsg" = betterproto.message_field(2)
    +    dump: Dict[str, "ListOfStrings"] = betterproto.map_field(
    +        1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE
    +    )
    +    before_after: str = betterproto.string_field(2)
    +    action: str = betterproto.string_field(3)
     
     
     @dataclass
    -class DropMissingRelation(betterproto.Message):
    -    """E025"""
    -
    +class CacheDumpGraphMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    relation: "ReferenceKeyMsg" = betterproto.message_field(2)
    +    data: "CacheDumpGraph" = betterproto.message_field(2)
     
     
     @dataclass
    -class DropCascade(betterproto.Message):
    -    """E026"""
    +class AdapterImportError(betterproto.Message):
    +    """E035"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    dropped: "ReferenceKeyMsg" = betterproto.message_field(2)
    -    consequences: List["ReferenceKeyMsg"] = betterproto.message_field(3)
    +    exc: str = betterproto.string_field(1)
     
     
     @dataclass
    -class DropRelation(betterproto.Message):
    -    """E027"""
    -
    +class AdapterImportErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    dropped: "ReferenceKeyMsg" = betterproto.message_field(2)
    +    data: "AdapterImportError" = betterproto.message_field(2)
     
     
     @dataclass
    -class UpdateReference(betterproto.Message):
    -    """E028"""
    +class PluginLoadError(betterproto.Message):
    +    """E036"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    old_key: "ReferenceKeyMsg" = betterproto.message_field(2)
    -    new_key: "ReferenceKeyMsg" = betterproto.message_field(3)
    -    cached_key: "ReferenceKeyMsg" = betterproto.message_field(4)
    +    exc_info: str = betterproto.string_field(1)
     
     
     @dataclass
    -class TemporaryRelation(betterproto.Message):
    -    """E029"""
    -
    +class PluginLoadErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    key: "ReferenceKeyMsg" = betterproto.message_field(2)
    +    data: "PluginLoadError" = betterproto.message_field(2)
     
     
     @dataclass
    -class RenameSchema(betterproto.Message):
    -    """E030"""
    +class NewConnectionOpening(betterproto.Message):
    +    """E037"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    old_key: "ReferenceKeyMsg" = betterproto.message_field(2)
    -    new_key: "ReferenceKeyMsg" = betterproto.message_field(3)
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    connection_state: str = betterproto.string_field(2)
     
     
     @dataclass
    -class DumpBeforeAddGraph(betterproto.Message):
    -    """E031"""
    -
    +class NewConnectionOpeningMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    dump: Dict[str, "ListOfStrings"] = betterproto.map_field(
    -        2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE
    -    )
    +    data: "NewConnectionOpening" = betterproto.message_field(2)
     
     
     @dataclass
    -class DumpAfterAddGraph(betterproto.Message):
    -    """E032"""
    +class CodeExecution(betterproto.Message):
    +    """E038"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    dump: Dict[str, "ListOfStrings"] = betterproto.map_field(
    -        2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE
    -    )
    +    conn_name: str = betterproto.string_field(1)
    +    code_content: str = betterproto.string_field(2)
     
     
     @dataclass
    -class DumpBeforeRenameSchema(betterproto.Message):
    -    """E033"""
    -
    +class CodeExecutionMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    dump: Dict[str, "ListOfStrings"] = betterproto.map_field(
    -        2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE
    -    )
    +    data: "CodeExecution" = betterproto.message_field(2)
     
     
     @dataclass
    -class DumpAfterRenameSchema(betterproto.Message):
    -    """E034"""
    +class CodeExecutionStatus(betterproto.Message):
    +    """E039"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    dump: Dict[str, "ListOfStrings"] = betterproto.map_field(
    -        2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE
    -    )
    +    status: str = betterproto.string_field(1)
    +    elapsed: float = betterproto.float_field(2)
     
     
     @dataclass
    -class AdapterImportError(betterproto.Message):
    -    """E035"""
    -
    +class CodeExecutionStatusMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    exc: str = betterproto.string_field(2)
    +    data: "CodeExecutionStatus" = betterproto.message_field(2)
     
     
     @dataclass
    -class PluginLoadError(betterproto.Message):
    -    """E036"""
    -
    -    info: "EventInfo" = betterproto.message_field(1)
    -    exc_info: str = betterproto.string_field(2)
    -
    -
    -@dataclass
    -class NewConnectionOpening(betterproto.Message):
    -    """E037"""
    +class CatalogGenerationError(betterproto.Message):
    +    """E040"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    connection_state: str = betterproto.string_field(3)
    +    exc: str = betterproto.string_field(1)
     
     
     @dataclass
    -class CodeExecution(betterproto.Message):
    -    """E038"""
    -
    +class CatalogGenerationErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    conn_name: str = betterproto.string_field(2)
    -    code_content: str = betterproto.string_field(3)
    +    data: "CatalogGenerationError" = betterproto.message_field(2)
     
     
     @dataclass
    -class CodeExecutionStatus(betterproto.Message):
    -    """E039"""
    -
    -    info: "EventInfo" = betterproto.message_field(1)
    -    status: str = betterproto.string_field(2)
    -    elapsed: float = betterproto.float_field(3)
    -
    -
    -@dataclass
    -class CatalogGenerationError(betterproto.Message):
    -    """E040"""
    +class WriteCatalogFailure(betterproto.Message):
    +    """E041"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    exc: str = betterproto.string_field(2)
    +    num_exceptions: int = betterproto.int32_field(1)
     
     
     @dataclass
    -class WriteCatalogFailure(betterproto.Message):
    -    """E041"""
    -
    +class WriteCatalogFailureMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    num_exceptions: int = betterproto.int32_field(2)
    +    data: "WriteCatalogFailure" = betterproto.message_field(2)
     
     
     @dataclass
     class CatalogWritten(betterproto.Message):
         """E042"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    path: str = betterproto.string_field(1)
     
     
     @dataclass
    -class CannotGenerateDocs(betterproto.Message):
    -    """E043"""
    -
    +class CatalogWrittenMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "CatalogWritten" = betterproto.message_field(2)
     
     
     @dataclass
    -class BuildingCatalog(betterproto.Message):
    -    """E044"""
    +class CannotGenerateDocs(betterproto.Message):
    +    """E043"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    pass
     
     
     @dataclass
    -class DatabaseErrorRunningHook(betterproto.Message):
    -    """E045"""
    -
    +class CannotGenerateDocsMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    hook_type: str = betterproto.string_field(2)
    +    data: "CannotGenerateDocs" = betterproto.message_field(2)
     
     
     @dataclass
    -class HooksRunning(betterproto.Message):
    -    """E046"""
    +class BuildingCatalog(betterproto.Message):
    +    """E044"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    num_hooks: int = betterproto.int32_field(2)
    -    hook_type: str = betterproto.string_field(3)
    +    pass
     
     
     @dataclass
    -class HookFinished(betterproto.Message):
    -    """E047"""
    -
    +class BuildingCatalogMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    stat_line: str = betterproto.string_field(2)
    -    execution: str = betterproto.string_field(3)
    -    execution_time: float = betterproto.float_field(4)
    +    data: "BuildingCatalog" = betterproto.message_field(2)
     
     
     @dataclass
    -class ParseCmdStart(betterproto.Message):
    -    """I001"""
    +class DatabaseErrorRunningHook(betterproto.Message):
    +    """E045"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    hook_type: str = betterproto.string_field(1)
     
     
     @dataclass
    -class ParseCmdCompiling(betterproto.Message):
    -    """I002"""
    -
    +class DatabaseErrorRunningHookMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "DatabaseErrorRunningHook" = betterproto.message_field(2)
     
     
     @dataclass
    -class ParseCmdWritingManifest(betterproto.Message):
    -    """I003"""
    +class HooksRunning(betterproto.Message):
    +    """E046"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    num_hooks: int = betterproto.int32_field(1)
    +    hook_type: str = betterproto.string_field(2)
     
     
     @dataclass
    -class ParseCmdDone(betterproto.Message):
    -    """I004"""
    -
    +class HooksRunningMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "HooksRunning" = betterproto.message_field(2)
     
     
     @dataclass
    -class ManifestDependenciesLoaded(betterproto.Message):
    -    """I005"""
    +class FinishedRunningStats(betterproto.Message):
    +    """E047"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    stat_line: str = betterproto.string_field(1)
    +    execution: str = betterproto.string_field(2)
    +    execution_time: float = betterproto.float_field(3)
     
     
     @dataclass
    -class ManifestLoaderCreated(betterproto.Message):
    -    """I006"""
    -
    +class FinishedRunningStatsMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "FinishedRunningStats" = betterproto.message_field(2)
     
     
     @dataclass
    -class ManifestLoaded(betterproto.Message):
    -    """I007"""
    +class ParseCmdOut(betterproto.Message):
    +    """I001"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    msg: str = betterproto.string_field(1)
     
     
     @dataclass
    -class ManifestChecked(betterproto.Message):
    -    """I008"""
    -
    +class ParseCmdOutMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "ParseCmdOut" = betterproto.message_field(2)
     
     
     @dataclass
    -class ManifestFlatGraphBuilt(betterproto.Message):
    -    """I009"""
    +class ParseCmdPerfInfoPath(betterproto.Message):
    +    """I010"""
     
         info: "EventInfo" = betterproto.message_field(1)
    +    path: str = betterproto.string_field(2)
     
     
     @dataclass
    -class ParseCmdPerfInfoPath(betterproto.Message):
    +class ParseCmdPerfInfoPathMsg(betterproto.Message):
         """I010"""
     
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    data: "ParseCmdPerfInfoPath" = betterproto.message_field(2)
     
     
     @dataclass
     class GenericTestFileParse(betterproto.Message):
         """I011"""
     
    +    path: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class GenericTestFileParseMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    data: "GenericTestFileParse" = betterproto.message_field(2)
     
     
     @dataclass
     class MacroFileParse(betterproto.Message):
         """I012"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    path: str = betterproto.string_field(1)
     
     
     @dataclass
    -class PartialParsingFullReparseBecauseOfError(betterproto.Message):
    -    """I013"""
    -
    +class MacroFileParseMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "MacroFileParse" = betterproto.message_field(2)
     
     
     @dataclass
    -class PartialParsingExceptionFile(betterproto.Message):
    +class PartialParsingErrorProcessingFile(betterproto.Message):
         """I014"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    file: str = betterproto.string_field(2)
    +    file: str = betterproto.string_field(1)
     
     
     @dataclass
    -class PartialParsingFile(betterproto.Message):
    -    """I015"""
    -
    +class PartialParsingErrorProcessingFileMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    file_id: str = betterproto.string_field(2)
    +    data: "PartialParsingErrorProcessingFile" = betterproto.message_field(2)
     
     
     @dataclass
    -class PartialParsingException(betterproto.Message):
    +class PartialParsingError(betterproto.Message):
         """I016"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
         exc_info: Dict[str, str] = betterproto.map_field(
    -        2, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    +        1, betterproto.TYPE_STRING, betterproto.TYPE_STRING
         )
     
     
     @dataclass
    -class PartialParsingSkipParsing(betterproto.Message):
    -    """I017"""
    -
    +class PartialParsingErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "PartialParsingError" = betterproto.message_field(2)
     
     
     @dataclass
    -class PartialParsingMacroChangeStartFullParse(betterproto.Message):
    -    """I018"""
    +class PartialParsingSkipParsing(betterproto.Message):
    +    """I017"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    pass
     
     
     @dataclass
    -class PartialParsingProjectEnvVarsChanged(betterproto.Message):
    -    """I019"""
    -
    +class PartialParsingSkipParsingMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "PartialParsingSkipParsing" = betterproto.message_field(2)
     
     
     @dataclass
    -class PartialParsingProfileEnvVarsChanged(betterproto.Message):
    -    """I020"""
    +class UnableToPartialParse(betterproto.Message):
    +    """I024"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    reason: str = betterproto.string_field(1)
     
     
     @dataclass
    -class PartialParsingDeletedMetric(betterproto.Message):
    -    """I021"""
    -
    +class UnableToPartialParseMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    unique_id: str = betterproto.string_field(2)
    +    data: "UnableToPartialParse" = betterproto.message_field(2)
     
     
     @dataclass
    -class ManifestWrongMetadataVersion(betterproto.Message):
    -    """I022"""
    +class StateCheckVarsHash(betterproto.Message):
    +    """I025"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    version: str = betterproto.string_field(2)
    +    checksum: str = betterproto.string_field(1)
    +    vars: str = betterproto.string_field(2)
    +    profile: str = betterproto.string_field(3)
    +    target: str = betterproto.string_field(4)
    +    version: str = betterproto.string_field(5)
     
     
     @dataclass
    -class PartialParsingVersionMismatch(betterproto.Message):
    -    """I023"""
    -
    +class StateCheckVarsHashMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    saved_version: str = betterproto.string_field(2)
    -    current_version: str = betterproto.string_field(3)
    +    data: "StateCheckVarsHash" = betterproto.message_field(2)
     
     
     @dataclass
    -class PartialParsingFailedBecauseConfigChange(betterproto.Message):
    -    """I024"""
    +class PartialParsingNotEnabled(betterproto.Message):
    +    """I028"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    pass
     
     
     @dataclass
    -class PartialParsingFailedBecauseProfileChange(betterproto.Message):
    -    """I025"""
    -
    +class PartialParsingNotEnabledMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "PartialParsingNotEnabled" = betterproto.message_field(2)
     
     
     @dataclass
    -class PartialParsingFailedBecauseNewProjectDependency(betterproto.Message):
    -    """I026"""
    +class ParsedFileLoadFailed(betterproto.Message):
    +    """I029"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    path: str = betterproto.string_field(1)
    +    exc: str = betterproto.string_field(2)
    +    exc_info: str = betterproto.string_field(3)
     
     
     @dataclass
    -class PartialParsingFailedBecauseHashChanged(betterproto.Message):
    -    """I027"""
    -
    +class ParsedFileLoadFailedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "ParsedFileLoadFailed" = betterproto.message_field(2)
     
     
     @dataclass
    -class PartialParsingNotEnabled(betterproto.Message):
    -    """I028"""
    +class PartialParsingEnabled(betterproto.Message):
    +    """I040"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    deleted: int = betterproto.int32_field(1)
    +    added: int = betterproto.int32_field(2)
    +    changed: int = betterproto.int32_field(3)
     
     
     @dataclass
    -class ParsedFileLoadFailed(betterproto.Message):
    -    """I029"""
    -
    +class PartialParsingEnabledMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    -    exc: str = betterproto.string_field(3)
    -    exc_info: str = betterproto.string_field(4)
    +    data: "PartialParsingEnabled" = betterproto.message_field(2)
     
     
     @dataclass
    -class PartialParseSaveFileNotFound(betterproto.Message):
    -    """I030"""
    +class PartialParsingFile(betterproto.Message):
    +    """I041"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    +    file_id: str = betterproto.string_field(1)
    +    operation: str = betterproto.string_field(2)
     
     
     @dataclass
    -class StaticParserCausedJinjaRendering(betterproto.Message):
    -    """I031"""
    -
    +class PartialParsingFileMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    data: "PartialParsingFile" = betterproto.message_field(2)
     
     
     @dataclass
    -class UsingExperimentalParser(betterproto.Message):
    -    """I032"""
    +class InvalidDisabledTargetInTestNode(betterproto.Message):
    +    """I050"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    resource_type_title: str = betterproto.string_field(1)
    +    unique_id: str = betterproto.string_field(2)
    +    original_file_path: str = betterproto.string_field(3)
    +    target_kind: str = betterproto.string_field(4)
    +    target_name: str = betterproto.string_field(5)
    +    target_package: str = betterproto.string_field(6)
     
     
     @dataclass
    -class SampleFullJinjaRendering(betterproto.Message):
    -    """I033"""
    -
    +class InvalidDisabledTargetInTestNodeMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    data: "InvalidDisabledTargetInTestNode" = betterproto.message_field(2)
     
     
     @dataclass
    -class StaticParserFallbackJinjaRendering(betterproto.Message):
    -    """I034"""
    +class UnusedResourceConfigPath(betterproto.Message):
    +    """I051"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    unused_config_paths: List[str] = betterproto.string_field(1)
     
     
     @dataclass
    -class StaticParsingMacroOverrideDetected(betterproto.Message):
    -    """I035"""
    -
    +class UnusedResourceConfigPathMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    data: "UnusedResourceConfigPath" = betterproto.message_field(2)
     
     
     @dataclass
    -class StaticParserSuccess(betterproto.Message):
    -    """I036"""
    +class SeedIncreased(betterproto.Message):
    +    """I052"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    package_name: str = betterproto.string_field(1)
    +    name: str = betterproto.string_field(2)
     
     
     @dataclass
    -class StaticParserFailure(betterproto.Message):
    -    """I037"""
    -
    +class SeedIncreasedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    data: "SeedIncreased" = betterproto.message_field(2)
     
     
     @dataclass
    -class ExperimentalParserSuccess(betterproto.Message):
    -    """I038"""
    +class SeedExceedsLimitSamePath(betterproto.Message):
    +    """I053"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    package_name: str = betterproto.string_field(1)
    +    name: str = betterproto.string_field(2)
     
     
     @dataclass
    -class ExperimentalParserFailure(betterproto.Message):
    -    """I039"""
    -
    +class SeedExceedsLimitSamePathMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    data: "SeedExceedsLimitSamePath" = betterproto.message_field(2)
     
     
     @dataclass
    -class PartialParsingEnabled(betterproto.Message):
    -    """I040"""
    +class SeedExceedsLimitAndPathChanged(betterproto.Message):
    +    """I054"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    deleted: int = betterproto.int32_field(2)
    -    added: int = betterproto.int32_field(3)
    -    changed: int = betterproto.int32_field(4)
    +    package_name: str = betterproto.string_field(1)
    +    name: str = betterproto.string_field(2)
     
     
     @dataclass
    -class PartialParsingAddedFile(betterproto.Message):
    -    """I041"""
    -
    +class SeedExceedsLimitAndPathChangedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    file_id: str = betterproto.string_field(2)
    +    data: "SeedExceedsLimitAndPathChanged" = betterproto.message_field(2)
     
     
     @dataclass
    -class PartialParsingDeletedFile(betterproto.Message):
    -    """I042"""
    +class SeedExceedsLimitChecksumChanged(betterproto.Message):
    +    """I055"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    file_id: str = betterproto.string_field(2)
    +    package_name: str = betterproto.string_field(1)
    +    name: str = betterproto.string_field(2)
    +    checksum_name: str = betterproto.string_field(3)
     
     
     @dataclass
    -class PartialParsingUpdatedFile(betterproto.Message):
    -    """I043"""
    -
    +class SeedExceedsLimitChecksumChangedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    file_id: str = betterproto.string_field(2)
    +    data: "SeedExceedsLimitChecksumChanged" = betterproto.message_field(2)
     
     
     @dataclass
    -class PartialParsingNodeMissingInSourceFile(betterproto.Message):
    -    """I044"""
    +class UnusedTables(betterproto.Message):
    +    """I056"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    file_id: str = betterproto.string_field(2)
    +    unused_tables: List[str] = betterproto.string_field(1)
     
     
     @dataclass
    -class PartialParsingMissingNodes(betterproto.Message):
    -    """I045"""
    -
    +class UnusedTablesMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    file_id: str = betterproto.string_field(2)
    +    data: "UnusedTables" = betterproto.message_field(2)
     
     
     @dataclass
    -class PartialParsingChildMapMissingUniqueID(betterproto.Message):
    -    """I046"""
    +class WrongResourceSchemaFile(betterproto.Message):
    +    """I057"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    unique_id: str = betterproto.string_field(2)
    +    patch_name: str = betterproto.string_field(1)
    +    resource_type: str = betterproto.string_field(2)
    +    plural_resource_type: str = betterproto.string_field(3)
    +    yaml_key: str = betterproto.string_field(4)
    +    file_path: str = betterproto.string_field(5)
     
     
     @dataclass
    -class PartialParsingUpdateSchemaFile(betterproto.Message):
    -    """I047"""
    -
    +class WrongResourceSchemaFileMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    file_id: str = betterproto.string_field(2)
    +    data: "WrongResourceSchemaFile" = betterproto.message_field(2)
     
     
     @dataclass
    -class PartialParsingDeletedSource(betterproto.Message):
    -    """I048"""
    +class NoNodeForYamlKey(betterproto.Message):
    +    """I058"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    unique_id: str = betterproto.string_field(2)
    +    patch_name: str = betterproto.string_field(1)
    +    yaml_key: str = betterproto.string_field(2)
    +    file_path: str = betterproto.string_field(3)
     
     
     @dataclass
    -class PartialParsingDeletedExposure(betterproto.Message):
    -    """I049"""
    -
    +class NoNodeForYamlKeyMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    unique_id: str = betterproto.string_field(2)
    +    data: "NoNodeForYamlKey" = betterproto.message_field(2)
     
     
     @dataclass
    -class InvalidDisabledTargetInTestNode(betterproto.Message):
    -    """I050"""
    +class MacroNotFoundForPatch(betterproto.Message):
    +    """I059"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    resource_type_title: str = betterproto.string_field(2)
    -    unique_id: str = betterproto.string_field(3)
    -    original_file_path: str = betterproto.string_field(4)
    -    target_kind: str = betterproto.string_field(5)
    -    target_name: str = betterproto.string_field(6)
    -    target_package: str = betterproto.string_field(7)
    +    patch_name: str = betterproto.string_field(1)
     
     
     @dataclass
    -class UnusedResourceConfigPath(betterproto.Message):
    -    """I051"""
    -
    +class MacroNotFoundForPatchMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    unused_config_paths: List[str] = betterproto.string_field(2)
    +    data: "MacroNotFoundForPatch" = betterproto.message_field(2)
     
     
     @dataclass
    -class SeedIncreased(betterproto.Message):
    -    """I052"""
    +class NodeNotFoundOrDisabled(betterproto.Message):
    +    """I060"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    package_name: str = betterproto.string_field(2)
    -    name: str = betterproto.string_field(3)
    +    original_file_path: str = betterproto.string_field(1)
    +    unique_id: str = betterproto.string_field(2)
    +    resource_type_title: str = betterproto.string_field(3)
    +    target_name: str = betterproto.string_field(4)
    +    target_kind: str = betterproto.string_field(5)
    +    target_package: str = betterproto.string_field(6)
    +    disabled: str = betterproto.string_field(7)
     
     
     @dataclass
    -class SeedExceedsLimitSamePath(betterproto.Message):
    -    """I053"""
    -
    +class NodeNotFoundOrDisabledMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    package_name: str = betterproto.string_field(2)
    -    name: str = betterproto.string_field(3)
    +    data: "NodeNotFoundOrDisabled" = betterproto.message_field(2)
     
     
     @dataclass
    -class SeedExceedsLimitAndPathChanged(betterproto.Message):
    -    """I054"""
    +class JinjaLogWarning(betterproto.Message):
    +    """I061"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    package_name: str = betterproto.string_field(2)
    -    name: str = betterproto.string_field(3)
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    msg: str = betterproto.string_field(2)
     
     
     @dataclass
    -class SeedExceedsLimitChecksumChanged(betterproto.Message):
    -    """I055"""
    -
    +class JinjaLogWarningMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    package_name: str = betterproto.string_field(2)
    -    name: str = betterproto.string_field(3)
    -    checksum_name: str = betterproto.string_field(4)
    +    data: "JinjaLogWarning" = betterproto.message_field(2)
     
     
     @dataclass
    -class UnusedTables(betterproto.Message):
    -    """I056"""
    +class JinjaLogInfo(betterproto.Message):
    +    """I062"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    unused_tables: List[str] = betterproto.string_field(2)
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    msg: str = betterproto.string_field(2)
     
     
     @dataclass
    -class WrongResourceSchemaFile(betterproto.Message):
    -    """I057"""
    -
    +class JinjaLogInfoMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    patch_name: str = betterproto.string_field(2)
    -    resource_type: str = betterproto.string_field(3)
    -    plural_resource_type: str = betterproto.string_field(4)
    -    yaml_key: str = betterproto.string_field(5)
    -    file_path: str = betterproto.string_field(6)
    +    data: "JinjaLogInfo" = betterproto.message_field(2)
     
     
     @dataclass
    -class NoNodeForYamlKey(betterproto.Message):
    -    """I058"""
    +class JinjaLogDebug(betterproto.Message):
    +    """I063"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    patch_name: str = betterproto.string_field(2)
    -    yaml_key: str = betterproto.string_field(3)
    -    file_path: str = betterproto.string_field(4)
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    msg: str = betterproto.string_field(2)
     
     
     @dataclass
    -class MacroPatchNotFound(betterproto.Message):
    -    """I059"""
    -
    +class JinjaLogDebugMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    patch_name: str = betterproto.string_field(2)
    +    data: "JinjaLogDebug" = betterproto.message_field(2)
     
     
     @dataclass
    -class NodeNotFoundOrDisabled(betterproto.Message):
    -    """I060"""
    +class GitSparseCheckoutSubdirectory(betterproto.Message):
    +    """M001"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    original_file_path: str = betterproto.string_field(2)
    -    unique_id: str = betterproto.string_field(3)
    -    resource_type_title: str = betterproto.string_field(4)
    -    target_name: str = betterproto.string_field(5)
    -    target_kind: str = betterproto.string_field(6)
    -    target_package: str = betterproto.string_field(7)
    -    disabled: str = betterproto.string_field(8)
    +    subdir: str = betterproto.string_field(1)
     
     
     @dataclass
    -class JinjaLogWarning(betterproto.Message):
    -    """I061"""
    -
    +class GitSparseCheckoutSubdirectoryMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    msg: str = betterproto.string_field(3)
    +    data: "GitSparseCheckoutSubdirectory" = betterproto.message_field(2)
     
     
     @dataclass
    -class GitSparseCheckoutSubdirectory(betterproto.Message):
    -    """M001"""
    +class GitProgressCheckoutRevision(betterproto.Message):
    +    """M002"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    subdir: str = betterproto.string_field(2)
    +    revision: str = betterproto.string_field(1)
     
     
     @dataclass
    -class GitProgressCheckoutRevision(betterproto.Message):
    -    """M002"""
    -
    +class GitProgressCheckoutRevisionMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    revision: str = betterproto.string_field(2)
    +    data: "GitProgressCheckoutRevision" = betterproto.message_field(2)
     
     
     @dataclass
     class GitProgressUpdatingExistingDependency(betterproto.Message):
         """M003"""
     
    +    dir: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class GitProgressUpdatingExistingDependencyMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    dir: str = betterproto.string_field(2)
    +    data: "GitProgressUpdatingExistingDependency" = betterproto.message_field(2)
     
     
     @dataclass
     class GitProgressPullingNewDependency(betterproto.Message):
         """M004"""
     
    +    dir: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class GitProgressPullingNewDependencyMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    dir: str = betterproto.string_field(2)
    +    data: "GitProgressPullingNewDependency" = betterproto.message_field(2)
     
     
     @dataclass
     class GitNothingToDo(betterproto.Message):
         """M005"""
     
    +    sha: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class GitNothingToDoMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    sha: str = betterproto.string_field(2)
    +    data: "GitNothingToDo" = betterproto.message_field(2)
     
     
     @dataclass
     class GitProgressUpdatedCheckoutRange(betterproto.Message):
         """M006"""
     
    +    start_sha: str = betterproto.string_field(1)
    +    end_sha: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class GitProgressUpdatedCheckoutRangeMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    start_sha: str = betterproto.string_field(2)
    -    end_sha: str = betterproto.string_field(3)
    +    data: "GitProgressUpdatedCheckoutRange" = betterproto.message_field(2)
     
     
     @dataclass
     class GitProgressCheckedOutAt(betterproto.Message):
         """M007"""
     
    +    end_sha: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class GitProgressCheckedOutAtMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    end_sha: str = betterproto.string_field(2)
    +    data: "GitProgressCheckedOutAt" = betterproto.message_field(2)
     
     
     @dataclass
     class RegistryProgressGETRequest(betterproto.Message):
         """M008"""
     
    +    url: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class RegistryProgressGETRequestMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    url: str = betterproto.string_field(2)
    +    data: "RegistryProgressGETRequest" = betterproto.message_field(2)
     
     
     @dataclass
     class RegistryProgressGETResponse(betterproto.Message):
         """M009"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    url: str = betterproto.string_field(2)
    -    resp_code: int = betterproto.int32_field(3)
    +    url: str = betterproto.string_field(1)
    +    resp_code: int = betterproto.int32_field(2)
     
     
     @dataclass
    -class SelectorReportInvalidSelector(betterproto.Message):
    -    """M010"""
    -
    +class RegistryProgressGETResponseMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    valid_selectors: str = betterproto.string_field(2)
    -    spec_method: str = betterproto.string_field(3)
    -    raw_spec: str = betterproto.string_field(4)
    +    data: "RegistryProgressGETResponse" = betterproto.message_field(2)
     
     
     @dataclass
    -class JinjaLogInfo(betterproto.Message):
    -    """M011"""
    +class SelectorReportInvalidSelector(betterproto.Message):
    +    """M010"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    msg: str = betterproto.string_field(3)
    +    valid_selectors: str = betterproto.string_field(1)
    +    spec_method: str = betterproto.string_field(2)
    +    raw_spec: str = betterproto.string_field(3)
     
     
     @dataclass
    -class JinjaLogDebug(betterproto.Message):
    -    """M012"""
    -
    +class SelectorReportInvalidSelectorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    msg: str = betterproto.string_field(3)
    +    data: "SelectorReportInvalidSelector" = betterproto.message_field(2)
     
     
     @dataclass
     class DepsNoPackagesFound(betterproto.Message):
         """M013"""
     
    +    pass
    +
    +
    +@dataclass
    +class DepsNoPackagesFoundMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "DepsNoPackagesFound" = betterproto.message_field(2)
     
     
     @dataclass
     class DepsStartPackageInstall(betterproto.Message):
         """M014"""
     
    +    package_name: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class DepsStartPackageInstallMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    package_name: str = betterproto.string_field(2)
    +    data: "DepsStartPackageInstall" = betterproto.message_field(2)
     
     
     @dataclass
     class DepsInstallInfo(betterproto.Message):
         """M015"""
     
    +    version_name: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class DepsInstallInfoMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    version_name: str = betterproto.string_field(2)
    +    data: "DepsInstallInfo" = betterproto.message_field(2)
     
     
     @dataclass
     class DepsUpdateAvailable(betterproto.Message):
         """M016"""
     
    +    version_latest: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class DepsUpdateAvailableMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    version_latest: str = betterproto.string_field(2)
    +    data: "DepsUpdateAvailable" = betterproto.message_field(2)
     
     
     @dataclass
     class DepsUpToDate(betterproto.Message):
         """M017"""
     
    +    pass
    +
    +
    +@dataclass
    +class DepsUpToDateMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "DepsUpToDate" = betterproto.message_field(2)
     
     
     @dataclass
     class DepsListSubdirectory(betterproto.Message):
         """M018"""
     
    +    subdirectory: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class DepsListSubdirectoryMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    subdirectory: str = betterproto.string_field(2)
    +    data: "DepsListSubdirectory" = betterproto.message_field(2)
     
     
     @dataclass
     class DepsNotifyUpdatesAvailable(betterproto.Message):
         """M019"""
     
    +    packages: "ListOfStrings" = betterproto.message_field(1)
    +
    +
    +@dataclass
    +class DepsNotifyUpdatesAvailableMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    packages: "ListOfStrings" = betterproto.message_field(2)
    +    data: "DepsNotifyUpdatesAvailable" = betterproto.message_field(2)
     
     
     @dataclass
     class RetryExternalCall(betterproto.Message):
         """M020"""
     
    +    attempt: int = betterproto.int32_field(1)
    +    max: int = betterproto.int32_field(2)
    +
    +
    +@dataclass
    +class RetryExternalCallMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    attempt: int = betterproto.int32_field(2)
    -    max: int = betterproto.int32_field(3)
    +    data: "RetryExternalCall" = betterproto.message_field(2)
     
     
     @dataclass
     class RecordRetryException(betterproto.Message):
         """M021"""
     
    +    exc: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class RecordRetryExceptionMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    exc: str = betterproto.string_field(2)
    +    data: "RecordRetryException" = betterproto.message_field(2)
     
     
     @dataclass
     class RegistryIndexProgressGETRequest(betterproto.Message):
         """M022"""
     
    +    url: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class RegistryIndexProgressGETRequestMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    url: str = betterproto.string_field(2)
    +    data: "RegistryIndexProgressGETRequest" = betterproto.message_field(2)
     
     
     @dataclass
     class RegistryIndexProgressGETResponse(betterproto.Message):
         """M023"""
     
    +    url: str = betterproto.string_field(1)
    +    resp_code: int = betterproto.int32_field(2)
    +
    +
    +@dataclass
    +class RegistryIndexProgressGETResponseMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    url: str = betterproto.string_field(2)
    -    resp_code: int = betterproto.int32_field(3)
    +    data: "RegistryIndexProgressGETResponse" = betterproto.message_field(2)
     
     
     @dataclass
     class RegistryResponseUnexpectedType(betterproto.Message):
         """M024"""
     
    +    response: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class RegistryResponseUnexpectedTypeMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    response: str = betterproto.string_field(2)
    +    data: "RegistryResponseUnexpectedType" = betterproto.message_field(2)
     
     
     @dataclass
     class RegistryResponseMissingTopKeys(betterproto.Message):
         """M025"""
     
    +    response: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class RegistryResponseMissingTopKeysMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    response: str = betterproto.string_field(2)
    +    data: "RegistryResponseMissingTopKeys" = betterproto.message_field(2)
     
     
     @dataclass
     class RegistryResponseMissingNestedKeys(betterproto.Message):
         """M026"""
     
    +    response: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class RegistryResponseMissingNestedKeysMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    response: str = betterproto.string_field(2)
    +    data: "RegistryResponseMissingNestedKeys" = betterproto.message_field(2)
     
     
     @dataclass
     class RegistryResponseExtraNestedKeys(betterproto.Message):
         """m027"""
     
    +    response: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class RegistryResponseExtraNestedKeysMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    response: str = betterproto.string_field(2)
    +    data: "RegistryResponseExtraNestedKeys" = betterproto.message_field(2)
     
     
     @dataclass
     class DepsSetDownloadDirectory(betterproto.Message):
         """M028"""
     
    +    path: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class DepsSetDownloadDirectoryMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    data: "DepsSetDownloadDirectory" = betterproto.message_field(2)
     
     
     @dataclass
     class DepsUnpinned(betterproto.Message):
         """M029"""
     
    +    revision: str = betterproto.string_field(1)
    +    git: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class DepsUnpinnedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    revision: str = betterproto.string_field(2)
    -    git: str = betterproto.string_field(3)
    +    data: "DepsUnpinned" = betterproto.message_field(2)
     
     
     @dataclass
     class NoNodesForSelectionCriteria(betterproto.Message):
         """M030"""
     
    +    spec_raw: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class NoNodesForSelectionCriteriaMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    spec_raw: str = betterproto.string_field(2)
    +    data: "NoNodesForSelectionCriteria" = betterproto.message_field(2)
     
     
     @dataclass
     class RunningOperationCaughtError(betterproto.Message):
         """Q001"""
     
    +    exc: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class RunningOperationCaughtErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    exc: str = betterproto.string_field(2)
    +    data: "RunningOperationCaughtError" = betterproto.message_field(2)
     
     
     @dataclass
     class CompileComplete(betterproto.Message):
         """Q002"""
     
    +    pass
    +
    +
    +@dataclass
    +class CompileCompleteMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "CompileComplete" = betterproto.message_field(2)
     
     
     @dataclass
     class FreshnessCheckComplete(betterproto.Message):
         """Q003"""
     
    +    pass
    +
    +
    +@dataclass
    +class FreshnessCheckCompleteMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "FreshnessCheckComplete" = betterproto.message_field(2)
     
     
     @dataclass
     class SeedHeader(betterproto.Message):
         """Q004"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    header: str = betterproto.string_field(2)
    +    header: str = betterproto.string_field(1)
     
     
     @dataclass
    -class SeedHeaderSeparator(betterproto.Message):
    -    """Q005"""
    -
    +class SeedHeaderMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    len_header: int = betterproto.int32_field(2)
    +    data: "SeedHeader" = betterproto.message_field(2)
     
     
     @dataclass
     class SQLRunnerException(betterproto.Message):
         """Q006"""
     
    +    exc: str = betterproto.string_field(1)
    +    exc_info: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class SQLRunnerExceptionMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    exc: str = betterproto.string_field(2)
    -    exc_info: str = betterproto.string_field(3)
    +    data: "SQLRunnerException" = betterproto.message_field(2)
     
     
     @dataclass
     class LogTestResult(betterproto.Message):
         """Q007"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    name: str = betterproto.string_field(2)
    +    status: str = betterproto.string_field(3)
    +    index: int = betterproto.int32_field(4)
    +    num_models: int = betterproto.int32_field(5)
    +    execution_time: float = betterproto.float_field(6)
    +    num_failures: int = betterproto.int32_field(7)
    +
    +
    +@dataclass
    +class LogTestResultMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    name: str = betterproto.string_field(3)
    -    status: str = betterproto.string_field(4)
    -    index: int = betterproto.int32_field(5)
    -    num_models: int = betterproto.int32_field(6)
    -    execution_time: float = betterproto.float_field(7)
    -    num_failures: int = betterproto.int32_field(8)
    +    data: "LogTestResult" = betterproto.message_field(2)
     
     
     @dataclass
     class LogStartLine(betterproto.Message):
         """Q011"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    description: str = betterproto.string_field(2)
    +    index: int = betterproto.int32_field(3)
    +    total: int = betterproto.int32_field(4)
    +
    +
    +@dataclass
    +class LogStartLineMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    description: str = betterproto.string_field(3)
    -    index: int = betterproto.int32_field(4)
    -    total: int = betterproto.int32_field(5)
    +    data: "LogStartLine" = betterproto.message_field(2)
     
     
     @dataclass
     class LogModelResult(betterproto.Message):
         """Q012"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    description: str = betterproto.string_field(2)
    +    status: str = betterproto.string_field(3)
    +    index: int = betterproto.int32_field(4)
    +    total: int = betterproto.int32_field(5)
    +    execution_time: int = betterproto.int32_field(6)
    +
    +
    +@dataclass
    +class LogModelResultMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    description: str = betterproto.string_field(3)
    -    status: str = betterproto.string_field(4)
    -    index: int = betterproto.int32_field(5)
    -    total: int = betterproto.int32_field(6)
    -    execution_time: int = betterproto.int32_field(7)
    +    data: "LogModelResult" = betterproto.message_field(2)
     
     
     @dataclass
     class LogSnapshotResult(betterproto.Message):
         """Q015"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    description: str = betterproto.string_field(3)
    -    status: str = betterproto.string_field(4)
    -    index: int = betterproto.int32_field(5)
    -    total: int = betterproto.int32_field(6)
    -    execution_time: float = betterproto.float_field(7)
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    description: str = betterproto.string_field(2)
    +    status: str = betterproto.string_field(3)
    +    index: int = betterproto.int32_field(4)
    +    total: int = betterproto.int32_field(5)
    +    execution_time: float = betterproto.float_field(6)
         cfg: Dict[str, str] = betterproto.map_field(
    -        8, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    +        7, betterproto.TYPE_STRING, betterproto.TYPE_STRING
         )
     
     
    +@dataclass
    +class LogSnapshotResultMsg(betterproto.Message):
    +    info: "EventInfo" = betterproto.message_field(1)
    +    data: "LogSnapshotResult" = betterproto.message_field(2)
    +
    +
     @dataclass
     class LogSeedResult(betterproto.Message):
         """Q016"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    status: str = betterproto.string_field(2)
    +    result_message: str = betterproto.string_field(3)
    +    index: int = betterproto.int32_field(4)
    +    total: int = betterproto.int32_field(5)
    +    execution_time: float = betterproto.float_field(6)
    +    schema: str = betterproto.string_field(7)
    +    relation: str = betterproto.string_field(8)
    +
    +
    +@dataclass
    +class LogSeedResultMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    status: str = betterproto.string_field(3)
    -    result_message: str = betterproto.string_field(4)
    -    index: int = betterproto.int32_field(5)
    -    total: int = betterproto.int32_field(6)
    -    execution_time: float = betterproto.float_field(7)
    -    schema: str = betterproto.string_field(8)
    -    relation: str = betterproto.string_field(9)
    +    data: "LogSeedResult" = betterproto.message_field(2)
     
     
     @dataclass
     class LogFreshnessResult(betterproto.Message):
         """Q018"""
     
    +    status: str = betterproto.string_field(1)
    +    node_info: "NodeInfo" = betterproto.message_field(2)
    +    index: int = betterproto.int32_field(3)
    +    total: int = betterproto.int32_field(4)
    +    execution_time: float = betterproto.float_field(5)
    +    source_name: str = betterproto.string_field(6)
    +    table_name: str = betterproto.string_field(7)
    +
    +
    +@dataclass
    +class LogFreshnessResultMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    status: str = betterproto.string_field(2)
    -    node_info: "NodeInfo" = betterproto.message_field(3)
    -    index: int = betterproto.int32_field(4)
    -    total: int = betterproto.int32_field(5)
    -    execution_time: float = betterproto.float_field(6)
    -    source_name: str = betterproto.string_field(7)
    -    table_name: str = betterproto.string_field(8)
    +    data: "LogFreshnessResult" = betterproto.message_field(2)
     
     
     @dataclass
     class LogCancelLine(betterproto.Message):
         """Q022"""
     
    +    conn_name: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class LogCancelLineMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    conn_name: str = betterproto.string_field(2)
    +    data: "LogCancelLine" = betterproto.message_field(2)
     
     
     @dataclass
     class DefaultSelector(betterproto.Message):
         """Q023"""
     
    +    name: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class DefaultSelectorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    name: str = betterproto.string_field(2)
    +    data: "DefaultSelector" = betterproto.message_field(2)
     
     
     @dataclass
     class NodeStart(betterproto.Message):
         """Q024"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +
    +
    +@dataclass
    +class NodeStartMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    +    data: "NodeStart" = betterproto.message_field(2)
     
     
     @dataclass
     class NodeFinished(betterproto.Message):
         """Q025"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    run_result: "RunResultMsg" = betterproto.message_field(2)
    +
    +
    +@dataclass
    +class NodeFinishedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    run_result: "RunResultMsg" = betterproto.message_field(4)
    +    data: "NodeFinished" = betterproto.message_field(2)
     
     
     @dataclass
     class QueryCancelationUnsupported(betterproto.Message):
         """Q026"""
     
    +    type: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class QueryCancelationUnsupportedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    type: str = betterproto.string_field(2)
    +    data: "QueryCancelationUnsupported" = betterproto.message_field(2)
     
     
     @dataclass
     class ConcurrencyLine(betterproto.Message):
         """Q027"""
     
    +    num_threads: int = betterproto.int32_field(1)
    +    target_name: str = betterproto.string_field(2)
    +    node_count: int = betterproto.int32_field(3)
    +
    +
    +@dataclass
    +class ConcurrencyLineMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    num_threads: int = betterproto.int32_field(2)
    -    target_name: str = betterproto.string_field(3)
    -    node_count: int = betterproto.int32_field(4)
    +    data: "ConcurrencyLine" = betterproto.message_field(2)
     
     
     @dataclass
     class WritingInjectedSQLForNode(betterproto.Message):
         """Q029"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +
    +
    +@dataclass
    +class WritingInjectedSQLForNodeMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    +    data: "WritingInjectedSQLForNode" = betterproto.message_field(2)
     
     
     @dataclass
     class NodeCompiling(betterproto.Message):
         """Q030"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +
    +
    +@dataclass
    +class NodeCompilingMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    +    data: "NodeCompiling" = betterproto.message_field(2)
     
     
     @dataclass
     class NodeExecuting(betterproto.Message):
         """Q031"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +
    +
    +@dataclass
    +class NodeExecutingMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    +    data: "NodeExecuting" = betterproto.message_field(2)
     
     
     @dataclass
     class LogHookStartLine(betterproto.Message):
         """Q032"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    statement: str = betterproto.string_field(2)
    +    index: int = betterproto.int32_field(3)
    +    total: int = betterproto.int32_field(4)
    +
    +
    +@dataclass
    +class LogHookStartLineMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    statement: str = betterproto.string_field(3)
    +    data: "LogHookStartLine" = betterproto.message_field(2)
    +
    +
    +@dataclass
    +class LogHookEndLine(betterproto.Message):
    +    """Q033"""
    +
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    statement: str = betterproto.string_field(2)
    +    status: str = betterproto.string_field(3)
         index: int = betterproto.int32_field(4)
         total: int = betterproto.int32_field(5)
    +    execution_time: float = betterproto.float_field(6)
    +
    +
    +@dataclass
    +class LogHookEndLineMsg(betterproto.Message):
    +    info: "EventInfo" = betterproto.message_field(1)
    +    data: "LogHookEndLine" = betterproto.message_field(2)
     
     
     @dataclass
    -class LogHookEndLine(betterproto.Message):
    -    """Q033"""
    +class SkippingDetails(betterproto.Message):
    +    """Q034"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    statement: str = betterproto.string_field(3)
    -    status: str = betterproto.string_field(4)
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    resource_type: str = betterproto.string_field(2)
    +    schema: str = betterproto.string_field(3)
    +    node_name: str = betterproto.string_field(4)
         index: int = betterproto.int32_field(5)
         total: int = betterproto.int32_field(6)
    -    execution_time: float = betterproto.float_field(7)
     
     
     @dataclass
    -class SkippingDetails(betterproto.Message):
    -    """Q034"""
    -
    +class SkippingDetailsMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    resource_type: str = betterproto.string_field(3)
    -    schema: str = betterproto.string_field(4)
    -    node_name: str = betterproto.string_field(5)
    -    index: int = betterproto.int32_field(6)
    -    total: int = betterproto.int32_field(7)
    +    data: "SkippingDetails" = betterproto.message_field(2)
     
     
     @dataclass
     class NothingToDo(betterproto.Message):
         """Q035"""
     
    +    pass
    +
    +
    +@dataclass
    +class NothingToDoMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "NothingToDo" = betterproto.message_field(2)
     
     
     @dataclass
     class RunningOperationUncaughtError(betterproto.Message):
         """Q036"""
     
    +    exc: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class RunningOperationUncaughtErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    exc: str = betterproto.string_field(2)
    +    data: "RunningOperationUncaughtError" = betterproto.message_field(2)
     
     
     @dataclass
     class EndRunResult(betterproto.Message):
         """Q037"""
     
    +    results: List["RunResultMsg"] = betterproto.message_field(1)
    +    elapsed_time: float = betterproto.float_field(2)
    +    generated_at: datetime = betterproto.message_field(3)
    +    success: bool = betterproto.bool_field(4)
    +
    +
    +@dataclass
    +class EndRunResultMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    results: List["RunResultMsg"] = betterproto.message_field(2)
    -    elapsed_time: float = betterproto.float_field(3)
    -    generated_at: datetime = betterproto.message_field(4)
    -    success: bool = betterproto.bool_field(5)
    +    data: "EndRunResult" = betterproto.message_field(2)
     
     
     @dataclass
     class NoNodesSelected(betterproto.Message):
         """Q038"""
     
    +    pass
    +
    +
    +@dataclass
    +class NoNodesSelectedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "NoNodesSelected" = betterproto.message_field(2)
     
     
     @dataclass
     class CatchableExceptionOnRun(betterproto.Message):
         """W002"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    exc: str = betterproto.string_field(2)
    +    exc_info: str = betterproto.string_field(3)
    +
    +
    +@dataclass
    +class CatchableExceptionOnRunMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    exc: str = betterproto.string_field(3)
    -    exc_info: str = betterproto.string_field(4)
    +    data: "CatchableExceptionOnRun" = betterproto.message_field(2)
     
     
     @dataclass
    -class InternalExceptionOnRun(betterproto.Message):
    +class InternalErrorOnRun(betterproto.Message):
         """W003"""
     
    +    build_path: str = betterproto.string_field(1)
    +    exc: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class InternalErrorOnRunMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    build_path: str = betterproto.string_field(2)
    -    exc: str = betterproto.string_field(3)
    +    data: "InternalErrorOnRun" = betterproto.message_field(2)
     
     
     @dataclass
     class GenericExceptionOnRun(betterproto.Message):
         """W004"""
     
    +    build_path: str = betterproto.string_field(1)
    +    unique_id: str = betterproto.string_field(2)
    +    exc: str = betterproto.string_field(3)
    +
    +
    +@dataclass
    +class GenericExceptionOnRunMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    build_path: str = betterproto.string_field(2)
    -    unique_id: str = betterproto.string_field(3)
    -    exc: str = betterproto.string_field(4)
    +    data: "GenericExceptionOnRun" = betterproto.message_field(2)
     
     
     @dataclass
     class NodeConnectionReleaseError(betterproto.Message):
         """W005"""
     
    +    node_name: str = betterproto.string_field(1)
    +    exc: str = betterproto.string_field(2)
    +    exc_info: str = betterproto.string_field(3)
    +
    +
    +@dataclass
    +class NodeConnectionReleaseErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_name: str = betterproto.string_field(2)
    -    exc: str = betterproto.string_field(3)
    -    exc_info: str = betterproto.string_field(4)
    +    data: "NodeConnectionReleaseError" = betterproto.message_field(2)
     
     
     @dataclass
     class FoundStats(betterproto.Message):
         """W006"""
     
    +    stat_line: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class FoundStatsMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    stat_line: str = betterproto.string_field(2)
    +    data: "FoundStats" = betterproto.message_field(2)
     
     
     @dataclass
     class MainKeyboardInterrupt(betterproto.Message):
         """Z001"""
     
    +    pass
    +
    +
    +@dataclass
    +class MainKeyboardInterruptMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "MainKeyboardInterrupt" = betterproto.message_field(2)
     
     
     @dataclass
     class MainEncounteredError(betterproto.Message):
         """Z002"""
     
    +    exc: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class MainEncounteredErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    exc: str = betterproto.string_field(2)
    +    data: "MainEncounteredError" = betterproto.message_field(2)
     
     
     @dataclass
     class MainStackTrace(betterproto.Message):
         """Z003"""
     
    +    stack_trace: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class MainStackTraceMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    stack_trace: str = betterproto.string_field(2)
    +    data: "MainStackTrace" = betterproto.message_field(2)
     
     
     @dataclass
     class SystemErrorRetrievingModTime(betterproto.Message):
         """Z004"""
     
    +    path: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class SystemErrorRetrievingModTimeMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    data: "SystemErrorRetrievingModTime" = betterproto.message_field(2)
     
     
     @dataclass
     class SystemCouldNotWrite(betterproto.Message):
         """Z005"""
     
    +    path: str = betterproto.string_field(1)
    +    reason: str = betterproto.string_field(2)
    +    exc: str = betterproto.string_field(3)
    +
    +
    +@dataclass
    +class SystemCouldNotWriteMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    -    reason: str = betterproto.string_field(3)
    -    exc: str = betterproto.string_field(4)
    +    data: "SystemCouldNotWrite" = betterproto.message_field(2)
     
     
     @dataclass
     class SystemExecutingCmd(betterproto.Message):
         """Z006"""
     
    +    cmd: List[str] = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class SystemExecutingCmdMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    cmd: List[str] = betterproto.string_field(2)
    +    data: "SystemExecutingCmd" = betterproto.message_field(2)
     
     
     @dataclass
    -class SystemStdOutMsg(betterproto.Message):
    +class SystemStdOut(betterproto.Message):
         """Z007"""
     
    +    bmsg: bytes = betterproto.bytes_field(1)
    +
    +
    +@dataclass
    +class SystemStdOutMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    bmsg: bytes = betterproto.bytes_field(2)
    +    data: "SystemStdOut" = betterproto.message_field(2)
     
     
     @dataclass
    -class SystemStdErrMsg(betterproto.Message):
    +class SystemStdErr(betterproto.Message):
         """Z008"""
     
    +    bmsg: bytes = betterproto.bytes_field(1)
    +
    +
    +@dataclass
    +class SystemStdErrMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    bmsg: bytes = betterproto.bytes_field(2)
    +    data: "SystemStdErr" = betterproto.message_field(2)
     
     
     @dataclass
     class SystemReportReturnCode(betterproto.Message):
         """Z009"""
     
    +    returncode: int = betterproto.int32_field(1)
    +
    +
    +@dataclass
    +class SystemReportReturnCodeMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    returncode: int = betterproto.int32_field(2)
    +    data: "SystemReportReturnCode" = betterproto.message_field(2)
     
     
     @dataclass
     class TimingInfoCollected(betterproto.Message):
         """Z010"""
     
    +    node_info: "NodeInfo" = betterproto.message_field(1)
    +    timing_info: "TimingInfoMsg" = betterproto.message_field(2)
    +
    +
    +@dataclass
    +class TimingInfoCollectedMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    node_info: "NodeInfo" = betterproto.message_field(2)
    -    timing_info: "TimingInfoMsg" = betterproto.message_field(3)
    +    data: "TimingInfoCollected" = betterproto.message_field(2)
     
     
     @dataclass
     class LogDebugStackTrace(betterproto.Message):
         """Z011"""
     
    +    exc_info: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class LogDebugStackTraceMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    exc_info: str = betterproto.string_field(2)
    +    data: "LogDebugStackTrace" = betterproto.message_field(2)
     
     
     @dataclass
     class CheckCleanPath(betterproto.Message):
         """Z012"""
     
    +    path: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class CheckCleanPathMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    data: "CheckCleanPath" = betterproto.message_field(2)
     
     
     @dataclass
     class ConfirmCleanPath(betterproto.Message):
         """Z013"""
     
    +    path: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class ConfirmCleanPathMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    data: "ConfirmCleanPath" = betterproto.message_field(2)
     
     
     @dataclass
     class ProtectedCleanPath(betterproto.Message):
         """Z014"""
     
    +    path: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class ProtectedCleanPathMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    data: "ProtectedCleanPath" = betterproto.message_field(2)
     
     
     @dataclass
     class FinishedCleanPaths(betterproto.Message):
         """Z015"""
     
    +    pass
    +
    +
    +@dataclass
    +class FinishedCleanPathsMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "FinishedCleanPaths" = betterproto.message_field(2)
     
     
     @dataclass
     class OpenCommand(betterproto.Message):
         """Z016"""
     
    +    open_cmd: str = betterproto.string_field(1)
    +    profiles_dir: str = betterproto.string_field(2)
    +
    +
    +@dataclass
    +class OpenCommandMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    open_cmd: str = betterproto.string_field(2)
    -    profiles_dir: str = betterproto.string_field(3)
    +    data: "OpenCommand" = betterproto.message_field(2)
     
     
     @dataclass
    -class EmptyLine(betterproto.Message):
    +class Formatting(betterproto.Message):
         """Z017"""
     
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class FormattingMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "Formatting" = betterproto.message_field(2)
     
     
     @dataclass
     class RunResultWarning(betterproto.Message):
         """Z021"""
     
    +    resource_type: str = betterproto.string_field(1)
    +    node_name: str = betterproto.string_field(2)
    +    path: str = betterproto.string_field(3)
    +
    +
    +@dataclass
    +class RunResultWarningMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    resource_type: str = betterproto.string_field(2)
    -    node_name: str = betterproto.string_field(3)
    -    path: str = betterproto.string_field(4)
    +    data: "RunResultWarning" = betterproto.message_field(2)
     
     
     @dataclass
     class RunResultFailure(betterproto.Message):
         """Z022"""
     
    +    resource_type: str = betterproto.string_field(1)
    +    node_name: str = betterproto.string_field(2)
    +    path: str = betterproto.string_field(3)
    +
    +
    +@dataclass
    +class RunResultFailureMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    resource_type: str = betterproto.string_field(2)
    -    node_name: str = betterproto.string_field(3)
    -    path: str = betterproto.string_field(4)
    +    data: "RunResultFailure" = betterproto.message_field(2)
     
     
     @dataclass
     class StatsLine(betterproto.Message):
         """Z023"""
     
    -    info: "EventInfo" = betterproto.message_field(1)
         stats: Dict[str, int] = betterproto.map_field(
    -        2, betterproto.TYPE_STRING, betterproto.TYPE_INT32
    +        1, betterproto.TYPE_STRING, betterproto.TYPE_INT32
         )
     
     
    +@dataclass
    +class StatsLineMsg(betterproto.Message):
    +    info: "EventInfo" = betterproto.message_field(1)
    +    data: "StatsLine" = betterproto.message_field(2)
    +
    +
     @dataclass
     class RunResultError(betterproto.Message):
         """Z024"""
     
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class RunResultErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    msg: str = betterproto.string_field(2)
    +    data: "RunResultError" = betterproto.message_field(2)
     
     
     @dataclass
     class RunResultErrorNoMessage(betterproto.Message):
         """Z025"""
     
    +    status: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class RunResultErrorNoMessageMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    status: str = betterproto.string_field(2)
    +    data: "RunResultErrorNoMessage" = betterproto.message_field(2)
     
     
     @dataclass
     class SQLCompiledPath(betterproto.Message):
         """Z026"""
     
    +    path: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class SQLCompiledPathMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    path: str = betterproto.string_field(2)
    +    data: "SQLCompiledPath" = betterproto.message_field(2)
     
     
     @dataclass
     class CheckNodeTestFailure(betterproto.Message):
         """Z027"""
     
    +    relation_name: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class CheckNodeTestFailureMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    relation_name: str = betterproto.string_field(2)
    +    data: "CheckNodeTestFailure" = betterproto.message_field(2)
     
     
     @dataclass
     class FirstRunResultError(betterproto.Message):
         """Z028"""
     
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class FirstRunResultErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    msg: str = betterproto.string_field(2)
    +    data: "FirstRunResultError" = betterproto.message_field(2)
     
     
     @dataclass
     class AfterFirstRunResultError(betterproto.Message):
         """Z029"""
     
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class AfterFirstRunResultErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    msg: str = betterproto.string_field(2)
    +    data: "AfterFirstRunResultError" = betterproto.message_field(2)
     
     
     @dataclass
     class EndOfRunSummary(betterproto.Message):
         """Z030"""
     
    +    num_errors: int = betterproto.int32_field(1)
    +    num_warnings: int = betterproto.int32_field(2)
    +    keyboard_interrupt: bool = betterproto.bool_field(3)
    +
    +
    +@dataclass
    +class EndOfRunSummaryMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    num_errors: int = betterproto.int32_field(2)
    -    num_warnings: int = betterproto.int32_field(3)
    -    keyboard_interrupt: bool = betterproto.bool_field(4)
    +    data: "EndOfRunSummary" = betterproto.message_field(2)
     
     
     @dataclass
     class LogSkipBecauseError(betterproto.Message):
         """Z034"""
     
    +    schema: str = betterproto.string_field(1)
    +    relation: str = betterproto.string_field(2)
    +    index: int = betterproto.int32_field(3)
    +    total: int = betterproto.int32_field(4)
    +
    +
    +@dataclass
    +class LogSkipBecauseErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    schema: str = betterproto.string_field(2)
    -    relation: str = betterproto.string_field(3)
    -    index: int = betterproto.int32_field(4)
    -    total: int = betterproto.int32_field(5)
    +    data: "LogSkipBecauseError" = betterproto.message_field(2)
     
     
     @dataclass
     class EnsureGitInstalled(betterproto.Message):
         """Z036"""
     
    +    pass
    +
    +
    +@dataclass
    +class EnsureGitInstalledMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "EnsureGitInstalled" = betterproto.message_field(2)
     
     
     @dataclass
     class DepsCreatingLocalSymlink(betterproto.Message):
         """Z037"""
     
    +    pass
    +
    +
    +@dataclass
    +class DepsCreatingLocalSymlinkMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "DepsCreatingLocalSymlink" = betterproto.message_field(2)
     
     
     @dataclass
     class DepsSymlinkNotAvailable(betterproto.Message):
         """Z038"""
     
    +    pass
    +
    +
    +@dataclass
    +class DepsSymlinkNotAvailableMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "DepsSymlinkNotAvailable" = betterproto.message_field(2)
     
     
     @dataclass
     class DisableTracking(betterproto.Message):
         """Z039"""
     
    +    pass
    +
    +
    +@dataclass
    +class DisableTrackingMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "DisableTracking" = betterproto.message_field(2)
     
     
     @dataclass
     class SendingEvent(betterproto.Message):
         """Z040"""
     
    +    kwargs: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class SendingEventMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    kwargs: str = betterproto.string_field(2)
    +    data: "SendingEvent" = betterproto.message_field(2)
     
     
     @dataclass
     class SendEventFailure(betterproto.Message):
         """Z041"""
     
    +    pass
    +
    +
    +@dataclass
    +class SendEventFailureMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "SendEventFailure" = betterproto.message_field(2)
     
     
     @dataclass
     class FlushEvents(betterproto.Message):
         """Z042"""
     
    +    pass
    +
    +
    +@dataclass
    +class FlushEventsMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "FlushEvents" = betterproto.message_field(2)
     
     
     @dataclass
     class FlushEventsFailure(betterproto.Message):
         """Z043"""
     
    +    pass
    +
    +
    +@dataclass
    +class FlushEventsFailureMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    +    data: "FlushEventsFailure" = betterproto.message_field(2)
     
     
     @dataclass
     class TrackingInitializeFailure(betterproto.Message):
         """Z044"""
     
    +    exc_info: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class TrackingInitializeFailureMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    exc_info: str = betterproto.string_field(2)
    +    data: "TrackingInitializeFailure" = betterproto.message_field(2)
     
     
     @dataclass
     class RunResultWarningMessage(betterproto.Message):
         """Z046"""
     
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class RunResultWarningMessageMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    msg: str = betterproto.string_field(2)
    +    data: "RunResultWarningMessage" = betterproto.message_field(2)
    +
    +
    +@dataclass
    +class DebugCmdOut(betterproto.Message):
    +    """Z047"""
    +
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class DebugCmdOutMsg(betterproto.Message):
    +    info: "EventInfo" = betterproto.message_field(1)
    +    data: "DebugCmdOut" = betterproto.message_field(2)
    +
    +
    +@dataclass
    +class DebugCmdResult(betterproto.Message):
    +    """Z048"""
    +
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class DebugCmdResultMsg(betterproto.Message):
    +    info: "EventInfo" = betterproto.message_field(1)
    +    data: "DebugCmdResult" = betterproto.message_field(2)
    +
    +
    +@dataclass
    +class ListCmdOut(betterproto.Message):
    +    """Z049"""
    +
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class ListCmdOutMsg(betterproto.Message):
    +    info: "EventInfo" = betterproto.message_field(1)
    +    data: "ListCmdOut" = betterproto.message_field(2)
    +
    +
    +@dataclass
    +class Note(betterproto.Message):
    +    """Z050"""
    +
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class NoteMsg(betterproto.Message):
    +    info: "EventInfo" = betterproto.message_field(1)
    +    data: "Note" = betterproto.message_field(2)
     
     
     @dataclass
     class IntegrationTestInfo(betterproto.Message):
         """T001"""
     
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class IntegrationTestInfoMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    msg: str = betterproto.string_field(2)
    +    data: "IntegrationTestInfo" = betterproto.message_field(2)
     
     
     @dataclass
     class IntegrationTestDebug(betterproto.Message):
         """T002"""
     
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class IntegrationTestDebugMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    msg: str = betterproto.string_field(2)
    +    data: "IntegrationTestDebug" = betterproto.message_field(2)
     
     
     @dataclass
     class IntegrationTestWarn(betterproto.Message):
         """T003"""
     
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class IntegrationTestWarnMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    msg: str = betterproto.string_field(2)
    +    data: "IntegrationTestWarn" = betterproto.message_field(2)
     
     
     @dataclass
     class IntegrationTestError(betterproto.Message):
         """T004"""
     
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class IntegrationTestErrorMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    msg: str = betterproto.string_field(2)
    +    data: "IntegrationTestError" = betterproto.message_field(2)
     
     
     @dataclass
     class IntegrationTestException(betterproto.Message):
         """T005"""
     
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class IntegrationTestExceptionMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    msg: str = betterproto.string_field(2)
    +    data: "IntegrationTestException" = betterproto.message_field(2)
     
     
     @dataclass
     class UnitTestInfo(betterproto.Message):
         """T006"""
     
    +    msg: str = betterproto.string_field(1)
    +
    +
    +@dataclass
    +class UnitTestInfoMsg(betterproto.Message):
         info: "EventInfo" = betterproto.message_field(1)
    -    msg: str = betterproto.string_field(2)
    +    data: "UnitTestInfo" = betterproto.message_field(2)
    diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto
    index 1c330106d92..ed57077e1da 100644
    --- a/core/dbt/events/types.proto
    +++ b/core/dbt/events/types.proto
    @@ -35,6 +35,7 @@ message NodeInfo {
         string node_status = 6;
         string node_started_at = 7;
         string node_finished_at = 8;
    +    map meta = 9;
     }
     
     // RunResult
    @@ -69,148 +70,283 @@ message GenericMessage {
     
     // A001
     message MainReportVersion {
    +    string version = 1;
    +    int32 log_version = 2;
    +}
    +
    +message MainReportVersionMsg {
         EventInfo info = 1;
    -    string version = 2;
    -    int32 log_version = 3;
    +    MainReportVersion data = 2;
     }
     
     // A002
     message MainReportArgs {
    +    map args = 1;
    +}
    +
    +message MainReportArgsMsg {
         EventInfo info = 1;
    -    map args = 2;
    +    MainReportArgs data = 2;
     }
     
     // A003
     message MainTrackingUserState {
    +    string user_state = 1;
    +}
    +
    +message MainTrackingUserStateMsg {
         EventInfo info = 1;
    -    string user_state = 2;
    +    MainTrackingUserState data = 2;
     }
     
     // A004
     message MergedFromState {
    +    int32 num_merged = 1;
    +    repeated string sample = 2;
    +}
    +
    +message MergedFromStateMsg {
         EventInfo info = 1;
    -    int32 num_merged = 2;
    -    repeated string sample = 3;
    +    MergedFromState data = 2;
     }
     
     // A005
     message MissingProfileTarget {
    +    string profile_name = 1;
    +    string target_name = 2;
    +}
    +
    +message MissingProfileTargetMsg {
         EventInfo info = 1;
    -    string profile_name = 2;
    -    string target_name = 3;
    +    MissingProfileTarget data = 2;
     }
     
     // Skipped A006, A007
     
     // A008
    -message InvalidVarsYAML {
    -    EventInfo info = 1;
    +message InvalidOptionYAML {
    +    string option_name = 1;
     }
     
    -// A009
    -message DbtProjectError {
    +message InvalidOptionYAMLMsg {
         EventInfo info = 1;
    +    InvalidOptionYAML data = 2;
     }
     
    -// A010
    -message DbtProjectErrorException {
    -    EventInfo info = 1;
    -    string exc = 2;
    -}
    -
    -// A011
    -message DbtProfileError {
    -    EventInfo info = 1;
    +// A009
    +message LogDbtProjectError {
    +    string exc = 1;
     }
     
    -// A012
    -message DbtProfileErrorException {
    +message LogDbtProjectErrorMsg {
         EventInfo info = 1;
    -    string exc = 2;
    +    LogDbtProjectError data = 2;
     }
     
    -// A013
    -message ProfileListTitle {
    -    EventInfo info = 1;
    -}
    +// Skipped A010
     
    -// A014
    -message ListSingleProfile {
    -    EventInfo info = 1;
    -    string profile = 2;
    +// A011
    +message LogDbtProfileError {
    +    string exc = 1;
    +    repeated string profiles = 2;
     }
     
    -// A015
    -message NoDefinedProfiles {
    +message LogDbtProfileErrorMsg {
         EventInfo info = 1;
    +    LogDbtProfileError data = 2;
     }
     
    -// A016
    -message ProfileHelpMessage {
    -    EventInfo info = 1;
    -}
    +// Skipped A012, A013, A014, A015, A016
     
     // A017
     message StarterProjectPath {
    +    string dir = 1;
    +}
    +
    +message StarterProjectPathMsg {
         EventInfo info = 1;
    -    string dir = 2;
    +    StarterProjectPath data = 2;
     }
     
     // A018
     message ConfigFolderDirectory {
    +    string dir = 1;
    +}
    +
    +message ConfigFolderDirectoryMsg {
         EventInfo info = 1;
    -    string dir = 2;
    +    ConfigFolderDirectory data = 2;
     }
     
     // A019
     message NoSampleProfileFound {
    +    string adapter = 1;
    +}
    +
    +message NoSampleProfileFoundMsg {
         EventInfo info = 1;
    -    string adapter = 2;
    +    NoSampleProfileFound data = 2;
     }
     
     // A020
     message ProfileWrittenWithSample {
    +    string name = 1;
    +    string path = 2;
    +}
    +
    +message ProfileWrittenWithSampleMsg {
         EventInfo info = 1;
    -    string name = 2;
    -    string path = 3;
    +    ProfileWrittenWithSample data = 2;
     }
     
     // A021
     message ProfileWrittenWithTargetTemplateYAML {
    +    string name = 1;
    +    string path = 2;
    +}
    +
    +message ProfileWrittenWithTargetTemplateYAMLMsg {
         EventInfo info = 1;
    -    string name = 2;
    -    string path = 3;
    +    ProfileWrittenWithTargetTemplateYAMLMsg data = 2;
     }
     
     // A022
     message ProfileWrittenWithProjectTemplateYAML {
    +    string name = 1;
    +    string path = 2;
    +}
    +
    +message ProfileWrittenWithProjectTemplateYAMLMsg {
         EventInfo info = 1;
    -    string name = 2;
    -    string path = 3;
    +    ProfileWrittenWithProjectTemplateYAML data = 2;
     }
     
     // A023
     message SettingUpProfile {
    +}
    +
    +message SettingUpProfileMsg {
         EventInfo info = 1;
    +    SettingUpProfile data = 2;
     }
     
     // A024
     message InvalidProfileTemplateYAML {
    +}
    +
    +message InvalidProfileTemplateYAMLMsg {
         EventInfo info = 1;
    +    InvalidProfileTemplateYAML data = 2;
     }
     
     // A025
     message ProjectNameAlreadyExists {
    +    string name = 1;
    +}
    +
    +message ProjectNameAlreadyExistsMsg {
         EventInfo info = 1;
    -    string name = 2;
    +    ProjectNameAlreadyExists data = 2;
     }
     
     // A026
     message ProjectCreated {
    +    string project_name = 1;
    +    string docs_url = 2;
    +    string slack_url = 3;
    +}
    +
    +message ProjectCreatedMsg {
    +    EventInfo info = 1;
    +    ProjectCreated data = 2;
    +}
    +
    +// D - Deprecation
    +
    +// D001
    +message PackageRedirectDeprecation {
    +    string old_name = 1;
    +    string new_name = 2;
    +}
    +
    +message PackageRedirectDeprecationMsg {
    +    EventInfo info = 1;
    +    PackageRedirectDeprecation data = 2;
    +}
    +
    +// D002
    +message PackageInstallPathDeprecation {
    +}
    +
    +message PackageInstallPathDeprecationMsg {
    +    EventInfo info = 1;
    +    PackageInstallPathDeprecation data = 2;
    +}
    +
    +// D003
    +message ConfigSourcePathDeprecation {
    +    string deprecated_path = 1;
    +    string exp_path = 2;
    +}
    +
    +message ConfigSourcePathDeprecationMsg {
    +    EventInfo info = 1;
    +    ConfigSourcePathDeprecation data = 2;
    +}
    +
    +// D004
    +message ConfigDataPathDeprecation {
    +    string deprecated_path = 1;
    +    string exp_path = 2;
    +}
    +
    +message ConfigDataPathDeprecationMsg {
    +    EventInfo info = 1;
    +    ConfigDataPathDeprecation data = 2;
    +}
    +
    +//D005
    +message AdapterDeprecationWarning {
    +    string old_name = 1;
    +    string new_name = 2;
    +}
    +
    +message AdapterDeprecationWarningMsg {
    +    EventInfo info = 1;
    +    AdapterDeprecationWarning data = 2;
    +}
    +
    +//D006
    +message MetricAttributesRenamed {
    +    string metric_name = 1;
    +}
    +
    +message MetricAttributesRenamedMsg {
    +    EventInfo info = 1;
    +    MetricAttributesRenamed data = 2;
    +}
    +
    +//D007
    +message ExposureNameDeprecation {
    +    string exposure = 1;
    +}
    +
    +message ExposureNameDeprecationMsg {
         EventInfo info = 1;
    -    string project_name = 2;
    -    string docs_url = 3;
    -    string slack_url = 4;
    +    ExposureNameDeprecation data = 2;
    +}
    +
    +//D008
    +message InternalDeprecation {
    +    string name = 1;
    +    string reason = 2;
    +    string suggested_action = 3;
    +    string version = 4;
    +}
    +
    +message InternalDeprecationMsg {
    +    EventInfo info = 1;
    +    InternalDeprecation data = 2;
     }
     
     // D - Deprecation
    @@ -264,880 +400,1009 @@ message ExposureNameDeprecation {
     
     // E001
     message AdapterEventDebug {
    +    NodeInfo node_info = 1;
    +    string name = 2;
    +    string base_msg = 3;
    +    repeated string args = 4;
    +}
    +
    +message AdapterEventDebugMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string name = 3;
    -    string base_msg = 4;
    -    repeated string args = 5;
    +    AdapterEventDebug data = 2;
     }
     
     // E002
     message AdapterEventInfo {
    +    NodeInfo node_info = 1;
    +    string name = 2;
    +    string base_msg = 3;
    +    repeated string args = 4;
    +}
    +
    +message AdapterEventInfoMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string name = 3;
    -    string base_msg = 4;
    -    repeated string args = 5;
    +    AdapterEventInfo data = 2;
     }
     
     // E003
     message AdapterEventWarning {
    +    NodeInfo node_info = 1;
    +    string name = 2;
    +    string base_msg = 3;
    +    repeated string args = 4;
    +}
    +
    +message AdapterEventWarningMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string name = 3;
    -    string base_msg = 4;
    -    repeated string args = 5;
    +    AdapterEventWarning data = 2;
     }
     
     // E004
     message AdapterEventError {
    +    NodeInfo node_info = 1;
    +    string name = 2;
    +    string base_msg = 3;
    +    repeated string args = 4;
    +    string exc_info = 5;
    +}
    +
    +message AdapterEventErrorMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string name = 3;
    -    string base_msg = 4;
    -    repeated string args = 5;
    -    string exc_info = 6;
    +    AdapterEventError data = 2;
     }
     
     // E005
     message NewConnection {
    +    NodeInfo node_info = 1;
    +    string conn_type = 2;
    +    string conn_name = 3;
    +}
    +
    +message NewConnectionMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string conn_type = 3;
    -    string conn_name = 4;
    +    NewConnection data = 2;
     }
     
     // E006
     message ConnectionReused {
    +    string conn_name = 1;
    +    string orig_conn_name = 2;
    +}
    +
    +message ConnectionReusedMsg {
         EventInfo info = 1;
    -    string conn_name = 2;
    +    ConnectionReused data = 2;
     }
     
     // E007
     message ConnectionLeftOpenInCleanup {
    +    string conn_name = 1;
    +}
    +
    +message ConnectionLeftOpenInCleanupMsg {
         EventInfo info = 1;
    -    string conn_name = 2;
    +    ConnectionLeftOpen data = 2;
     }
     
     // E008
     message ConnectionClosedInCleanup {
    +    string conn_name = 1;
    +}
    +
    +message ConnectionClosedInCleanupMsg {
         EventInfo info = 1;
    -    string conn_name = 2;
    +    ConnectionClosedInCleanup data = 2;
     }
     
     // E009
     message RollbackFailed {
    +    NodeInfo node_info = 1;
    +    string conn_name = 2;
    +    string exc_info = 3;
    +}
    +
    +message RollbackFailedMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string conn_name = 3;
    -    string exc_info = 4;
    +    RollbackFailed data = 2;
     }
     
     // E010
     message ConnectionClosed {
    +    NodeInfo node_info = 1;
    +    string conn_name = 2;
    +}
    +
    +message ConnectionClosedMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string conn_name = 3;
    +    ConnectionClosed data = 2;
     }
     
     // E011
     message ConnectionLeftOpen {
    +    NodeInfo node_info = 1;
    +    string conn_name = 2;
    +}
    +
    +message ConnectionLeftOpenMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string conn_name = 3;
    +    ConnectionLeftOpen data = 2;
     }
     
     // E012
     message Rollback {
    -    EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string conn_name = 3;
    +    NodeInfo node_info = 1;
    +    string conn_name = 2;
     }
     
    -// E013
    -message CacheMiss {
    +message RollbackMsg {
         EventInfo info = 1;
    -    string conn_name = 2;
    -    string database = 3;
    -    string schema = 4;
    +    Rollback data = 2;
     }
     
    -// E014
    -message ListRelations {
    -    EventInfo info = 1;
    +// E013
    +message CacheMiss {
    +    string conn_name = 1;
         string database = 2;
         string schema = 3;
    -    repeated ReferenceKeyMsg relations = 4;
     }
     
    -// E015
    -message ConnectionUsed {
    +message CacheMissMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string conn_type = 3;
    -    string conn_name = 4;
    +    CacheMiss data = 2;
     }
     
    -// E016
    -message SQLQuery {
    -    EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string conn_name = 3;
    -    string sql = 4;
    +// E014
    +message ListRelations {
    +    string database = 1;
    +    string schema = 2;
    +    repeated ReferenceKeyMsg relations = 3;
     }
     
    -// E017
    -message SQLQueryStatus {
    +message ListRelationsMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string status = 3;
    -    float elapsed = 4;
    +    ListRelations data = 2;
     }
     
    -// E018
    -message SQLCommit {
    -    EventInfo info = 1;
    -    NodeInfo node_info = 2;
    +// E015
    +message ConnectionUsed {
    +    NodeInfo node_info = 1;
    +    string conn_type = 2;
         string conn_name = 3;
     }
     
    -// E019
    -message ColTypeChange {
    -    EventInfo info = 1;
    -    string orig_type = 2;
    -    string new_type = 3;
    -    ReferenceKeyMsg table = 4;
    -}
    -
    -// E020
    -message SchemaCreation {
    -    EventInfo info = 1;
    -    ReferenceKeyMsg relation = 2;
    -}
    -
    -// E021
    -message SchemaDrop {
    -    EventInfo info = 1;
    -    ReferenceKeyMsg relation = 2;
    -}
    -
    -// E022
    -message UncachedRelation {
    -    EventInfo info = 1;
    -    ReferenceKeyMsg dep_key = 2;
    -    ReferenceKeyMsg ref_key = 3;
    -}
    -
    -// E023
    -message AddLink {
    -    EventInfo info = 1;
    -    ReferenceKeyMsg dep_key = 2;
    -    ReferenceKeyMsg ref_key = 3;
    -}
    -
    -// E024
    -message AddRelation {
    -    EventInfo info = 1;
    -    ReferenceKeyMsg relation = 2;
    -}
    -
    -// E025
    -message DropMissingRelation {
    +message ConnectionUsedMsg {
         EventInfo info = 1;
    -    ReferenceKeyMsg relation = 2;
    +    ConnectionUsed data = 2;
     }
     
    -// E026
    -message DropCascade {
    -    EventInfo info = 1;
    -    ReferenceKeyMsg dropped = 2;
    -    repeated ReferenceKeyMsg consequences = 3;
    -}
    -
    -// E027
    -message DropRelation {
    -    EventInfo info = 1;
    -    ReferenceKeyMsg dropped = 2;
    -}
    -
    -// E028
    -message UpdateReference {
    -    EventInfo info = 1;
    -    ReferenceKeyMsg old_key = 2;
    -    ReferenceKeyMsg new_key = 3;
    -    ReferenceKeyMsg cached_key = 4;
    +// E016
    +message SQLQuery {
    +    NodeInfo node_info = 1;
    +    string conn_name = 2;
    +    string sql = 3;
     }
     
    -// E029
    -message TemporaryRelation {
    +message SQLQueryMsg {
         EventInfo info = 1;
    -    ReferenceKeyMsg key = 2;
    +    SQLQuery data = 2;
     }
     
    -// E030
    -message RenameSchema {
    -    EventInfo info = 1;
    -    ReferenceKeyMsg old_key = 2;
    -    ReferenceKeyMsg new_key = 3;
    +// E017
    +message SQLQueryStatus {
    +    NodeInfo node_info = 1;
    +    string status = 2;
    +    float elapsed = 3;
     }
     
    -// E031
    -message DumpBeforeAddGraph {
    +message SQLQueryStatusMsg {
         EventInfo info = 1;
    -    map dump = 2;
    +    SQLQueryStatus data = 2;
     }
     
    -// E032
    -message DumpAfterAddGraph {
    -    EventInfo info = 1;
    -    map dump = 2;
    +// E018
    +message SQLCommit {
    +    NodeInfo node_info = 1;
    +    string conn_name = 2;
     }
     
    -// E033
    -message DumpBeforeRenameSchema {
    +message SQLCommitMsg {
         EventInfo info = 1;
    -    map dump = 2;
    +    SQLCommit data = 2;
     }
     
    -// E034
    -message DumpAfterRenameSchema {
    -    EventInfo info = 1;
    -    map dump = 2;
    +// E019
    +message ColTypeChange {
    +    string orig_type = 1;
    +    string new_type = 2;
    +    ReferenceKeyMsg table = 3;
     }
     
    -// E035
    -message AdapterImportError {
    +message ColTypeChangeMsg {
         EventInfo info = 1;
    -    string exc = 2;
    +    ColTypeChange data = 2;
     }
     
    -// E036
    -message PluginLoadError {
    -    EventInfo info = 1;
    -    string exc_info = 2;
    +// E020
    +message SchemaCreation {
    +    ReferenceKeyMsg relation = 1;
     }
     
    -// E037
    -message NewConnectionOpening {
    +message SchemaCreationMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string connection_state = 3;
    +    SchemaCreation data = 2;
     }
     
    -// E038
    -message CodeExecution {
    -    EventInfo info = 1;
    -    string conn_name = 2;
    -    string code_content = 3;
    +// E021
    +message SchemaDrop {
    +    ReferenceKeyMsg relation = 1;
     }
     
    -// E039
    -message CodeExecutionStatus {
    +message SchemaDropMsg {
         EventInfo info = 1;
    -    string status = 2;
    -    float elapsed = 3;
    +    SchemaDrop data = 2;
     }
     
    -// E040
    -message CatalogGenerationError {
    -    EventInfo info = 1;
    -    string exc = 2;
    +// E022
    +message CacheAction {
    +    string action = 1;
    +    ReferenceKeyMsg ref_key = 2;
    +    ReferenceKeyMsg ref_key_2 = 3;
    +    ReferenceKeyMsg ref_key_3 = 4;
    +    repeated ReferenceKeyMsg ref_list = 5;
     }
     
    -// E041
    -message WriteCatalogFailure {
    +message CacheActionMsg {
         EventInfo info = 1;
    -    int32 num_exceptions = 2;
    +    CacheAction data = 2;
     }
     
    -// E042
    -message CatalogWritten {
    -    EventInfo info = 1;
    -    string path = 2;
    -}
    +// Skipping E023, E024, E025, E026, E027, E028, E029, E0230
     
    -// E043
    -message CannotGenerateDocs {
    -    EventInfo info = 1;
    +// E031
    +message CacheDumpGraph {
    +    map dump = 1;
    +    string before_after = 2;
    +    string action = 3;
     }
     
    -// E044
    -message BuildingCatalog {
    +message CacheDumpGraphMsg {
         EventInfo info = 1;
    +    CacheDumpGraph data = 2;
     }
     
    -// E045
    -message DatabaseErrorRunningHook {
    -    EventInfo info = 1;
    -    string hook_type = 2;
    -}
     
    -// E046
    -message HooksRunning {
    -    EventInfo info = 1;
    -    int32 num_hooks = 2;
    -    string hook_type = 3;
    -}
    +// Skipping E032, E033, E034
     
    -// E047
    -message HookFinished {
    -    EventInfo info = 1;
    -    string stat_line = 2;
    -    string execution = 3;
    -    float execution_time = 4;
    +// E035
    +message AdapterImportError {
    +    string exc = 1;
     }
     
    -
    -// I - Project parsing
    -
    -// I001
    -message ParseCmdStart {
    +message AdapterImportErrorMsg {
         EventInfo info = 1;
    +    AdapterImportError data = 2;
     }
     
    -// I002
    -message ParseCmdCompiling {
    -    EventInfo info = 1;
    +// E036
    +message PluginLoadError {
    +    string exc_info = 1;
     }
     
    -// I003
    -message ParseCmdWritingManifest {
    +message PluginLoadErrorMsg {
         EventInfo info = 1;
    +    PluginLoadError data = 2;
     }
     
    -// I004
    -message ParseCmdDone {
    -    EventInfo info = 1;
    +// E037
    +message NewConnectionOpening {
    +    NodeInfo node_info = 1;
    +    string connection_state = 2;
     }
     
    -// I005
    -message ManifestDependenciesLoaded {
    +message NewConnectionOpeningMsg {
         EventInfo info = 1;
    +    NewConnectionOpening data = 2;
     }
     
    -// I006
    -message ManifestLoaderCreated {
    -    EventInfo info = 1;
    +// E038
    +message CodeExecution {
    +    string conn_name = 1;
    +    string code_content = 2;
     }
     
    -// I007
    -message ManifestLoaded {
    +message CodeExecutionMsg {
         EventInfo info = 1;
    +    CodeExecution data = 2;
     }
     
    -// I008
    -message ManifestChecked {
    -    EventInfo info = 1;
    +// E039
    +message CodeExecutionStatus {
    +    string status = 1;
    +    float elapsed = 2;
     }
     
    -// I009
    -message ManifestFlatGraphBuilt {
    +message CodeExecutionStatusMsg {
         EventInfo info = 1;
    +    CodeExecutionStatus data = 2;
     }
     
    -// I010
    -message ParseCmdPerfInfoPath {
    -    EventInfo info = 1;
    -    string path = 2;
    +// E040
    +message CatalogGenerationError {
    +    string exc = 1;
     }
     
    -// I011
    -message GenericTestFileParse {
    +message CatalogGenerationErrorMsg {
         EventInfo info = 1;
    -    string path = 2;
    +    CatalogGenerationError data = 2;
     }
     
    -// I012
    -message MacroFileParse {
    -    EventInfo info = 1;
    -    string path = 2;
    +// E041
    +message WriteCatalogFailure {
    +    int32 num_exceptions = 1;
     }
     
    -// I013
    -message PartialParsingFullReparseBecauseOfError  {
    +message WriteCatalogFailureMsg {
         EventInfo info = 1;
    +    WriteCatalogFailure data = 2;
     }
     
    -// I014
    -message PartialParsingExceptionFile {
    -    EventInfo info = 1;
    -    string file = 2;
    +// E042
    +message CatalogWritten {
    +    string path = 1;
     }
     
    -// I015
    -message PartialParsingFile {
    +message CatalogWrittenMsg {
         EventInfo info = 1;
    -    string file_id = 2;
    +    CatalogWritten data = 2;
     }
     
    -// I016
    -message PartialParsingException {
    -    EventInfo info = 1;
    -    map exc_info = 2;
    +// E043
    +message CannotGenerateDocs {
     }
     
    -// I017
    -message PartialParsingSkipParsing {
    +message CannotGenerateDocsMsg {
         EventInfo info = 1;
    +    CannotGenerateDocs data = 2;
     }
     
    -// I018
    -message PartialParsingMacroChangeStartFullParse {
    -    EventInfo info = 1;
    +// E044
    +message BuildingCatalog {
     }
     
    -// I019
    -message PartialParsingProjectEnvVarsChanged {
    +message BuildingCatalogMsg {
         EventInfo info = 1;
    +    BuildingCatalog data = 2;
     }
     
    -// I020
    -message PartialParsingProfileEnvVarsChanged {
    -    EventInfo info = 1;
    +// E045
    +message DatabaseErrorRunningHook {
    +    string hook_type = 1;
     }
     
    -// I021
    -message PartialParsingDeletedMetric {
    +message DatabaseErrorRunningHookMsg {
         EventInfo info = 1;
    -    string unique_id = 2;
    +    DatabaseErrorRunningHook data = 2;
     }
     
    -// I022
    -message ManifestWrongMetadataVersion {
    -    EventInfo info = 1;
    -    string version = 2;
    +// E046
    +message HooksRunning {
    +    int32 num_hooks = 1;
    +    string hook_type = 2;
     }
     
    -// I023
    -message PartialParsingVersionMismatch {
    +message HooksRunningMsg {
         EventInfo info = 1;
    -    string saved_version = 2;
    -    string current_version = 3;
    +    HooksRunning data = 2;
     }
     
    -// I024
    -message PartialParsingFailedBecauseConfigChange {
    -    EventInfo info = 1;
    +// E047
    +message FinishedRunningStats {
    +    string stat_line = 1;
    +    string execution = 2;
    +    float execution_time = 3;
     }
     
    -// I025
    -message PartialParsingFailedBecauseProfileChange {
    +message FinishedRunningStatsMsg {
         EventInfo info = 1;
    +    FinishedRunningStats data = 2;
     }
     
    -// I026
    -message PartialParsingFailedBecauseNewProjectDependency {
    -    EventInfo info = 1;
    +
    +// I - Project parsing
    +
    +// I001
    +message ParseCmdOut {
    +    string msg = 1;
     }
     
    -// I027
    -message PartialParsingFailedBecauseHashChanged {
    +message ParseCmdOutMsg {
         EventInfo info = 1;
    +    ParseCmdOut data = 2;
     }
     
    -// I028
    -message PartialParsingNotEnabled {
    -    EventInfo info = 1;
    +// Skipping I002, I003, I004, I005, I006, I007, I008, I009, I010
    +
    +
    +// I011
    +message GenericTestFileParse {
    +    string path = 1;
     }
     
    -// I029
    -message ParsedFileLoadFailed {
    +message GenericTestFileParseMsg {
         EventInfo info = 1;
    -    string path = 2;
    -    string exc = 3;
    -    string exc_info = 4;
    +    GenericTestFileParse data = 2;
     }
     
    -// I030
    -message PartialParseSaveFileNotFound {
    -    EventInfo info = 1;
    +// I012
    +message MacroFileParse {
    +    string path = 1;
     }
     
    -// I031
    -message StaticParserCausedJinjaRendering {
    +message MacroFileParseMsg {
         EventInfo info = 1;
    -    string path = 2;
    +    MacroFileParse data = 2;
     }
     
    -// I032
    -message UsingExperimentalParser {
    -    EventInfo info = 1;
    -    string path = 2;
    +// Skipping I013
    +
    +// I014
    +message PartialParsingErrorProcessingFile {
    +    string file = 1;
     }
     
    -// I033
    -message SampleFullJinjaRendering {
    +message PartialParsingErrorProcessingFileMsg {
         EventInfo info = 1;
    -    string path = 2;
    +    PartialParsingErrorProcessingFile data = 2;
     }
     
    -// I034
    -message StaticParserFallbackJinjaRendering {
    -    EventInfo info = 1;
    -    string path = 2;
    +// I016
    +message PartialParsingError {
    +    map exc_info = 1;
     }
     
    -// I035
    -message StaticParsingMacroOverrideDetected {
    +message PartialParsingErrorMsg {
         EventInfo info = 1;
    -    string path = 2;
    +    PartialParsingError data = 2;
     }
     
    -// I036
    -message StaticParserSuccess {
    -    EventInfo info = 1;
    -    string path = 2;
    +// I017
    +message PartialParsingSkipParsing {
     }
     
    -// I037
    -message StaticParserFailure {
    +message PartialParsingSkipParsingMsg {
         EventInfo info = 1;
    -    string path = 2;
    +    PartialParsingSkipParsing data = 2;
     }
     
    -// I038
    -message ExperimentalParserSuccess {
    -    EventInfo info = 1;
    -    string path = 2;
    +// Skipped I018, I019, I020, I021, I022, I023
    +
    +// I024
    +message UnableToPartialParse {
    +    string reason = 1;
     }
     
    -// I039
    -message ExperimentalParserFailure {
    +message UnableToPartialParseMsg {
         EventInfo info = 1;
    -    string path = 2;
    +    UnableToPartialParse data = 2;
     }
     
    -// I040
    -message PartialParsingEnabled {
    -    EventInfo info = 1;
    -    int32 deleted = 2;
    -    int32 added = 3;
    -    int32 changed = 4;
    +// I025
    +message StateCheckVarsHash {
    +    string checksum = 1;
    +    string vars = 2;
    +    string profile = 3;
    +    string target = 4;
    +    string version = 5;
     }
     
    -// I041
    -message PartialParsingAddedFile {
    +message StateCheckVarsHashMsg {
         EventInfo info = 1;
    -    string file_id = 2;
    +    StateCheckVarsHash data = 2;
     }
     
    -// I042
    -message PartialParsingDeletedFile {
    -    EventInfo info = 1;
    -    string file_id = 2;
    +// Skipped I026, I027
    +
    +
    +// I028
    +message PartialParsingNotEnabled {
     }
     
    -// I043
    -message PartialParsingUpdatedFile {
    +message PartialParsingNotEnabledMsg {
         EventInfo info = 1;
    -    string file_id = 2;
    +    PartialParsingNotEnabled data = 2;
     }
     
    -// I044
    -message PartialParsingNodeMissingInSourceFile {
    -    EventInfo info = 1;
    -    string file_id=2;
    +// I029
    +message ParsedFileLoadFailed {
    +    string path = 1;
    +    string exc = 2;
    +    string exc_info = 3;
     }
     
    -// I045
    -message PartialParsingMissingNodes {
    +message ParsedFileLoadFailedMsg {
         EventInfo info = 1;
    -    string file_id = 2;
    +    ParsedFileLoadFailed data = 2;
     }
     
    -// I046
    -message PartialParsingChildMapMissingUniqueID {
    -    EventInfo info = 1;
    -    string unique_id = 2;
    +// Skipping I030 - I039
    +
    +// I040
    +message PartialParsingEnabled {
    +    int32 deleted = 1;
    +    int32 added = 2;
    +    int32 changed = 3;
     }
     
    -// I047
    -message PartialParsingUpdateSchemaFile {
    +message PartialParsingEnabledMsg {
         EventInfo info = 1;
    -    string file_id = 2;
    +    PartialParsingEnabled data = 2;
     }
     
    -// I048
    -message PartialParsingDeletedSource {
    -    EventInfo info = 1;
    -    string unique_id = 2;
    +// I041
    +message PartialParsingFile {
    +    string file_id = 1;
    +    string operation = 2;
     }
     
    -// I049
    -message PartialParsingDeletedExposure {
    +message PartialParsingFileMsg {
         EventInfo info = 1;
    -    string unique_id = 2;
    +    PartialParsingFile data = 2;
     }
     
    +// Skipped I042, I043, I044, I045, I046, I047, I048, I049
    +
     // I050
     message InvalidDisabledTargetInTestNode {
    +    string resource_type_title = 1;
    +    string unique_id = 2;
    +    string original_file_path = 3;
    +    string target_kind = 4;
    +    string target_name = 5;
    +    string target_package = 6;
    +}
    +
    +message InvalidDisabledTargetInTestNodeMsg {
         EventInfo info = 1;
    -    string resource_type_title = 2;
    -    string unique_id = 3;
    -    string original_file_path = 4;
    -    string target_kind = 5;
    -    string target_name = 6;
    -    string target_package = 7;
    +    InvalidDisabledTargetInTestNode data = 2;
     }
     
     // I051
     message UnusedResourceConfigPath {
    +    repeated string unused_config_paths = 1;
    +}
    +
    +message UnusedResourceConfigPathMsg {
         EventInfo info = 1;
    -    repeated string unused_config_paths = 2;
    +    UnusedResourceConfigPath data = 2;
     }
     
     // I052
     message SeedIncreased {
    +    string package_name = 1;
    +    string name = 2;
    +}
    +
    +message SeedIncreasedMsg {
         EventInfo info = 1;
    -    string package_name = 2;
    -    string name = 3;
    +    SeedIncreased data = 2;
     }
     
     // I053
     message SeedExceedsLimitSamePath {
    +    string package_name = 1;
    +    string name = 2;
    +}
    +
    +message SeedExceedsLimitSamePathMsg {
         EventInfo info = 1;
    -    string package_name = 2;
    -    string name = 3;
    +    SeedExceedsLimitSamePath data = 2;
     }
     
     // I054
     message SeedExceedsLimitAndPathChanged {
    +    string package_name = 1;
    +    string name = 2;
    +}
    +
    +message SeedExceedsLimitAndPathChangedMsg {
         EventInfo info = 1;
    -    string package_name = 2;
    -    string name = 3;
    +    SeedExceedsLimitAndPathChanged data = 2;
     }
     
     // I055
     message SeedExceedsLimitChecksumChanged {
    +    string package_name = 1;
    +    string name = 2;
    +    string checksum_name = 3;
    +}
    +
    +message SeedExceedsLimitChecksumChangedMsg {
         EventInfo info = 1;
    -    string package_name = 2;
    -    string name = 3;
    -    string checksum_name = 4;
    +    SeedExceedsLimitChecksumChanged data = 2;
     }
     
     // I056
     message UnusedTables {
    +    repeated string unused_tables = 1;
    +}
    +
    +message UnusedTablesMsg {
         EventInfo info = 1;
    -    repeated string unused_tables = 2;
    +    UnusedTables data = 2;
     }
     
     // I057
     message WrongResourceSchemaFile {
    +    string patch_name = 1;
    +    string resource_type = 2;
    +    string plural_resource_type = 3;
    +    string yaml_key = 4;
    +    string file_path = 5;
    +}
    +
    +message WrongResourceSchemaFileMsg {
         EventInfo info = 1;
    -    string patch_name = 2;
    -    string resource_type = 3;
    -    string plural_resource_type = 4;
    -    string yaml_key = 5;
    -    string file_path = 6;
    +    WrongResourceSchemaFile data = 2;
     }
     
     // I058
     message NoNodeForYamlKey {
    +    string patch_name = 1;
    +    string yaml_key = 2;
    +    string file_path = 3;
    +}
    +
    +message NoNodeForYamlKeyMsg {
         EventInfo info = 1;
    -    string patch_name = 2;
    -    string yaml_key = 3;
    -    string file_path = 4;
    +    NoNodeForYamlKey data = 2;
     }
     
     // I059
    -message MacroPatchNotFound {
    +message MacroNotFoundForPatch {
    +    string patch_name = 1;
    +}
    +
    +message MacroNotFoundForPatchMsg {
         EventInfo info = 1;
    -    string patch_name = 2;
    +    MacroNotFoundForPatch data = 2;
     }
     
     // I060
     message NodeNotFoundOrDisabled {
    +    string original_file_path = 1;
    +    string unique_id = 2;
    +    string resource_type_title = 3;
    +    string target_name = 4;
    +    string target_kind = 5;
    +    string target_package = 6;
    +    string disabled = 7;
    +}
    +
    +message NodeNotFoundOrDisabledMsg {
         EventInfo info = 1;
    -    string original_file_path = 2;
    -    string unique_id = 3;
    -    string resource_type_title = 4;
    -    string target_name = 5;
    -    string target_kind = 6;
    -    string target_package = 7;
    -    string disabled = 8;
    +    NodeNotFoundOrDisabled data = 2;
     }
     
     // I061
     message JinjaLogWarning {
    +    NodeInfo node_info = 1;
    +    string msg = 2;
    +}
    +
    +message JinjaLogWarningMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string msg = 3;
    +    JinjaLogWarning data = 2;
    +}
    +
    +// I062
    +message JinjaLogInfo {
    +    NodeInfo node_info = 1;
    +    string msg = 2;
    +}
    +
    +message  JinjaLogInfoMsg {
    +    EventInfo info = 1;
    +     JinjaLogInfo data = 2;
    +}
    +
    +// I063
    +message JinjaLogDebug {
    +    NodeInfo node_info = 1;
    +    string msg = 2;
    +}
    +
    +message JinjaLogDebugMsg {
    +    EventInfo info = 1;
    +    JinjaLogDebug data = 2;
     }
     
     // M - Deps generation
     
     // M001
     message GitSparseCheckoutSubdirectory {
    +    string subdir = 1;
    +}
    +
    +message GitSparseCheckoutSubdirectoryMsg {
         EventInfo info = 1;
    -    string subdir = 2;
    +    GitSparseCheckoutSubdirectory data = 2;
     }
     
     // M002
     message GitProgressCheckoutRevision {
    +    string revision = 1;
    +}
    +
    +message GitProgressCheckoutRevisionMsg {
         EventInfo info = 1;
    -    string revision = 2;
    +    GitProgressCheckoutRevision data = 2;
     }
     
     // M003
     message GitProgressUpdatingExistingDependency {
    +    string dir = 1;
    +}
    +
    +message GitProgressUpdatingExistingDependencyMsg {
         EventInfo info = 1;
    -    string dir = 2;
    +    GitProgressUpdatingExistingDependency data = 2;
     }
     
     // M004
     message GitProgressPullingNewDependency {
    +    string dir = 1;
    +}
    +
    +message GitProgressPullingNewDependencyMsg {
         EventInfo info = 1;
    -    string dir = 2;
    +    GitProgressPullingNewDependency data = 2;
     }
     
     // M005
     message GitNothingToDo {
    +    string sha = 1;
    +}
    +
    +message GitNothingToDoMsg {
         EventInfo info = 1;
    -    string sha = 2;
    +    GitNothingToDo data = 2;
     }
     
     // M006
     message GitProgressUpdatedCheckoutRange {
    +    string start_sha = 1;
    +    string end_sha = 2;
    +}
    +
    +message GitProgressUpdatedCheckoutRangeMsg {
         EventInfo info = 1;
    -    string start_sha = 2;
    -    string end_sha = 3;
    +    GitProgressUpdatedCheckoutRange data = 2;
     }
     
     // M007
     message GitProgressCheckedOutAt {
    +    string end_sha = 1;
    +}
    +
    +message GitProgressCheckedOutAtMsg {
         EventInfo info = 1;
    -    string end_sha = 2;
    +    GitProgressCheckedOutAt data = 2;
     }
     
     // M008
     message RegistryProgressGETRequest {
    +    string url = 1;
    +}
    +
    +message RegistryProgressGETRequestMsg {
         EventInfo info = 1;
    -    string url = 2;
    +    RegistryProgressGETRequest data = 2;
     }
     
     // M009
     message RegistryProgressGETResponse {
    +    string url = 1;
    +    int32 resp_code = 2;
    +}
    +
    +message RegistryProgressGETResponseMsg {
         EventInfo info = 1;
    -    string url = 2;
    -    int32 resp_code = 3;
    +    RegistryProgressGETResponse data = 2;
     }
     
     // M010
     message SelectorReportInvalidSelector {
    -    EventInfo info = 1;
    -    string valid_selectors = 2;
    -    string spec_method = 3;
    -    string raw_spec = 4;
    +    string valid_selectors = 1;
    +    string spec_method = 2;
    +    string raw_spec = 3;
     }
     
    -// M011
    -message JinjaLogInfo {
    +message SelectorReportInvalidSelectorMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string msg = 3;
    +    SelectorReportInvalidSelector data = 2;
     }
     
    -// M012
    -message JinjaLogDebug {
    -    EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string msg = 3;
    -}
    +// Skipped M011 and M012
     
     // M013
     message DepsNoPackagesFound {
    +}
    +
    +message DepsNoPackagesFoundMsg {
         EventInfo info = 1;
    +    DepsNoPackagesFound data = 2;
     }
     
     // M014
     message DepsStartPackageInstall {
    +    string package_name = 1;
    +}
    +
    +message DepsStartPackageInstallMsg {
         EventInfo info = 1;
    -    string package_name = 2;
    +    DepsStartPackageInstall data = 2;
     }
     
     // M015
     message DepsInstallInfo {
    +    string version_name = 1;
    +}
    +
    +message DepsInstallInfoMsg {
         EventInfo info = 1;
    -    string version_name = 2;
    +    DepsInstallInfo data = 2;
     }
     
     // M016
     message DepsUpdateAvailable {
    +    string version_latest = 1;
    +}
    +
    +message DepsUpdateAvailableMsg {
         EventInfo info = 1;
    -    string version_latest = 2;
    +    DepsUpdateAvailable data = 2;
     }
     
     // M017
     message DepsUpToDate {
    +}
    +
    +message DepsUpToDateMsg {
         EventInfo info = 1;
    +    DepsUpToDate data = 2;
     }
     
     // M018
     message DepsListSubdirectory {
    +    string subdirectory = 1;
    +}
    +
    +message DepsListSubdirectoryMsg {
         EventInfo info = 1;
    -    string subdirectory = 2;
    +    DepsListSubdirectory data = 2;
     }
     
     // M019
     message DepsNotifyUpdatesAvailable {
    +    ListOfStrings packages = 1;
    +}
    +
    +message DepsNotifyUpdatesAvailableMsg {
         EventInfo info = 1;
    -    ListOfStrings packages = 2;
    +    DepsNotifyUpdatesAvailable data = 2;
     }
     
     // M020
     message RetryExternalCall {
    +    int32 attempt = 1;
    +    int32 max = 2;
    +}
    +
    +message RetryExternalCallMsg {
         EventInfo info = 1;
    -    int32 attempt = 2;
    -    int32 max = 3;
    +    RetryExternalCall data = 2;
     }
     
     // M021
     message RecordRetryException {
    +    string exc = 1;
    +}
    +
    +message RecordRetryExceptionMsg {
         EventInfo info = 1;
    -    string exc = 2;
    +    RecordRetryException data = 2;
     }
     
     // M022
     message RegistryIndexProgressGETRequest {
    +    string url = 1;
    +}
    +
    +message RegistryIndexProgressGETRequestMsg {
         EventInfo info = 1;
    -    string url = 2;
    +    RegistryIndexProgressGETRequest data = 2;
     }
     
     // M023
     message RegistryIndexProgressGETResponse {
    +    string url = 1;
    +    int32 resp_code = 2;
    +}
    +
    +message RegistryIndexProgressGETResponseMsg {
         EventInfo info = 1;
    -    string url = 2;
    -    int32 resp_code = 3;
    +    RegistryIndexProgressGETResponse data = 2;
     }
     
     // M024
     message RegistryResponseUnexpectedType {
    +    string response = 1;
    +}
    +
    +message RegistryResponseUnexpectedTypeMsg {
         EventInfo info = 1;
    -    string response = 2;
    +    RegistryResponseUnexpectedType data = 2;
     }
     
     // M025
     message RegistryResponseMissingTopKeys {
    +    string response = 1;
    +}
    +
    +message RegistryResponseMissingTopKeysMsg {
         EventInfo info = 1;
    -    string response = 2;
    +    RegistryResponseMissingTopKeys data = 2;
     }
     
     // M026
     message RegistryResponseMissingNestedKeys {
    +    string response = 1;
    +}
    +
    +message RegistryResponseMissingNestedKeysMsg {
         EventInfo info = 1;
    -    string response = 2;
    +    RegistryResponseMissingNestedKeys data = 2;
     }
     
     // m027
     message RegistryResponseExtraNestedKeys {
    +    string response = 1;
    +}
    +
    +message RegistryResponseExtraNestedKeysMsg {
         EventInfo info = 1;
    -    string response = 2;
    +    RegistryResponseExtraNestedKeys data = 2;
     }
     
     // M028
     message DepsSetDownloadDirectory {
    +    string path = 1;
    +}
    +
    +message DepsSetDownloadDirectoryMsg {
         EventInfo info = 1;
    -    string path = 2;
    +    DepsSetDownloadDirectory data = 2;
    +}
    +
    +// M029
    +message DepsUnpinned {
    +    string revision = 1;
    +    string git = 2;
    +}
    +
    +message DepsUnpinnedMsg {
    +    EventInfo info = 1;
    +    DepsUnpinned data = 2;
    +}
    +
    +// M030
    +message NoNodesForSelectionCriteria {
    +    string spec_raw = 1;
    +}
    +
    +message NoNodesForSelectionCriteriaMsg {
    +    EventInfo info = 1;
    +    NoNodesForSelectionCriteria data = 2;
     }
     
     // M029
    @@ -1157,49 +1422,69 @@ message NoNodesForSelectionCriteria {
     
     // Q001
     message RunningOperationCaughtError {
    +    string exc = 1;
    +}
    +
    +message RunningOperationCaughtErrorMsg {
         EventInfo info = 1;
    -    string exc = 2;
    +    RunningOperationCaughtError data = 2;
     }
     
     // Q002
     message CompileComplete {
    +}
    +
    +message CompileCompleteMsg {
         EventInfo info = 1;
    +    CompileComplete data = 2;
     }
     
     // Q003
     message FreshnessCheckComplete {
    +}
    +
    +message FreshnessCheckCompleteMsg {
         EventInfo info = 1;
    +    FreshnessCheckComplete data = 2;
     }
     
     // Q004
     message SeedHeader {
    -    EventInfo info = 1;
    -    string header = 2;
    +    string header = 1;
     }
     
    -// Q005
    -message SeedHeaderSeparator {
    +message SeedHeaderMsg {
         EventInfo info = 1;
    -    int32 len_header = 2;
    +    SeedHeader data = 2;
     }
     
    +// Skipped Q005
    +
     // Q006
     message SQLRunnerException {
    +    string exc = 1;
    +    string exc_info = 2;
    +}
    +
    +message SQLRunnerExceptionMsg {
         EventInfo info = 1;
    -    string exc = 2;
    -    string exc_info = 3;
    +    SQLRunnerException data = 2;
     }
     
     // Q007
     message LogTestResult {
    +    NodeInfo node_info = 1;
    +    string name = 2;
    +    string status = 3;
    +    int32 index = 4;
    +    int32 num_models = 5;
    +    float execution_time = 6;
    +    int32 num_failures = 7;
    +}
    +
    +message LogTestResultMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string name = 3;
    -    string status = 4;
    -    int32 index = 5;
    -    int32 num_models = 6;
    -    float execution_time = 7;
    -    int32 num_failures = 8;
    +    LogTestResult data = 2;
     }
     
     
    @@ -1208,63 +1493,83 @@ message LogTestResult {
     
     // Q011
     message LogStartLine {
    +    NodeInfo node_info = 1;
    +    string description = 2;
    +    int32 index = 3;
    +    int32 total = 4;
    +}
    +
    +message LogStartLineMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string description = 3;
    -    int32 index = 4;
    -    int32 total = 5;
    +    LogStartLine data = 2;
     }
     
     // Q012
     message LogModelResult {
    +    NodeInfo node_info = 1;
    +    string description = 2;
    +    string status = 3;
    +    int32 index = 4;
    +    int32 total = 5;
    +    int32 execution_time = 6;
    +}
    +
    +message LogModelResultMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string description = 3;
    -    string status = 4;
    -    int32 index = 5;
    -    int32 total = 6;
    -    int32 execution_time = 7;
    +    LogModelResult data = 2;
     }
     
     // skipped Q013, Q014
     
     // Q015
     message LogSnapshotResult {
    +    NodeInfo node_info = 1;
    +    string description = 2;
    +    string status = 3;
    +    int32 index = 4;
    +    int32 total = 5;
    +    float execution_time = 6;
    +    map cfg = 7;
    +}
    +
    +message LogSnapshotResultMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string description = 3;
    -    string status = 4;
    -    int32 index = 5;
    -    int32 total = 6;
    -    float execution_time = 7;
    -    map cfg = 8;
    +    LogSnapshotResult data = 2;
     }
     
     // Q016
     message LogSeedResult {
    +    NodeInfo node_info = 1;
    +    string status = 2;
    +    string result_message = 3;
    +    int32 index = 4;
    +    int32 total = 5;
    +    float execution_time = 6;
    +    string source_name = 7;
    +    string table_name = 8;
    +}
    +
    +message LogSeedResultMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string status = 3;
    -    string result_message = 4;
    -    int32 index = 5;
    -    int32 total = 6;
    -    float execution_time = 7;
    -    string schema = 8;
    -    string relation = 9;
    +    LogSeedResult data = 2;
     }
     
     // Skipped Q017
     
     // Q018
     message LogFreshnessResult {
    +    string status = 1;
    +    NodeInfo node_info = 2;
    +    int32 index = 3;
    +    int32 total = 4;
    +    float execution_time = 5;
    +    string source_name = 6;
    +    string table_name = 7;
    +}
    +
    +message LogFreshnessResultMsg {
         EventInfo info = 1;
    -    string status = 2;
    -    NodeInfo node_info = 3;
    -    int32 index = 4;
    -    int32 total = 5;
    -    float execution_time = 6;
    -    string source_name = 7;
    -    string table_name = 8;
    +    LogFreshnessResult data = 2;
     }
     
     
    @@ -1273,112 +1578,181 @@ message LogFreshnessResult {
     
     // Q022
     message LogCancelLine {
    +    string conn_name = 1;
    +}
    +
    +message LogCancelLineMsg {
         EventInfo info = 1;
    -    string conn_name = 2;
    +    LogCancelLine data = 2;
     }
     
     // Q023
     message DefaultSelector {
    +    string name = 1;
    +}
    +
    +message DefaultSelectorMsg {
         EventInfo info = 1;
    -    string name = 2;
    +    DefaultSelector data = 2;
     }
     
     // Q024
     message NodeStart {
    +    NodeInfo node_info = 1;
    +}
    +
    +message NodeStartMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    +    NodeStart data = 2;
     }
     
     // Q025
     message NodeFinished {
    +    NodeInfo node_info = 1;
    +    RunResultMsg run_result = 2;
    +}
    +
    +message NodeFinishedMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    RunResultMsg run_result = 4;
    +    NodeFinished data = 2;
     }
     
     // Q026
     message QueryCancelationUnsupported {
    +    string type = 1;
    +}
    +
    +message QueryCancelationUnsupportedMsg {
         EventInfo info = 1;
    -    string type = 2;
    +    QueryCancelationUnsupported data = 2;
     }
     
     // Q027
     message ConcurrencyLine {
    +    int32 num_threads = 1;
    +    string target_name = 2;
    +    int32 node_count = 3;
    +}
    +
    +message ConcurrencyLineMsg {
         EventInfo info = 1;
    -    int32 num_threads = 2;
    -    string target_name = 3;
    -    int32 node_count = 4;
    +    ConcurrencyLine data = 2;
     }
     
     // Skipped Q028
     
     // Q029
     message WritingInjectedSQLForNode {
    +    NodeInfo node_info = 1;
    +}
    +
    +message WritingInjectedSQLForNodeMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    +    WritingInjectedSQLForNode data = 2;
     }
     
     // Q030
     message NodeCompiling {
    +    NodeInfo node_info = 1;
    +}
    +
    +message NodeCompilingMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    +    NodeCompiling data = 2;
     }
     
     // Q031
     message NodeExecuting {
    +    NodeInfo node_info = 1;
    +}
    +
    +message NodeExecutingMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    +    NodeExecuting data = 2;
     }
     
     // Q032
     message LogHookStartLine {
    +    NodeInfo node_info = 1;
    +    string statement = 2;
    +    int32 index = 3;
    +    int32 total = 4;
    +}
    +
    +message LogHookStartLineMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string statement = 3;
    -    int32 index = 4;
    -    int32 total = 5;
    +    LogHookStartLine data = 2;
     }
     
     // Q033
     message LogHookEndLine {
    +    NodeInfo node_info = 1;
    +    string statement = 2;
    +    string status = 3;
    +    int32 index = 4;
    +    int32 total = 5;
    +    float execution_time = 6;
    +}
    +
    +message LogHookEndLineMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string statement = 3;
    -    string status = 4;
    -    int32 index = 5;
    -    int32 total = 6;
    -    float execution_time = 7;
    +    LogHookEndLine data = 2;
     }
     
     // Q034
     message SkippingDetails {
    +    NodeInfo node_info = 1;
    +    string resource_type = 2;
    +    string schema = 3;
    +    string node_name = 4;
    +    int32 index = 5;
    +    int32 total = 6;
    +}
    +
    +message SkippingDetailsMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string resource_type = 3;
    -    string schema = 4;
    -    string node_name = 5;
    -    int32 index = 6;
    -    int32 total = 7;
    +    SkippingDetails data = 2;
     }
     
     // Q035
     message NothingToDo {
    +}
    +
    +message NothingToDoMsg {
         EventInfo info = 1;
    +    NothingToDo data = 2;
     }
     
     // Q036
     message RunningOperationUncaughtError {
    +    string exc = 1;
    +}
    +
    +message RunningOperationUncaughtErrorMsg {
         EventInfo info = 1;
    -    string exc = 2;
    +    RunningOperationUncaughtError data = 2;
     }
     
     // Q037
     message EndRunResult {
    +    repeated RunResultMsg results = 1;
    +    float elapsed_time = 2;
    +    google.protobuf.Timestamp generated_at = 3;
    +    bool success = 4;
    +}
    +
    +message EndRunResultMsg {
         EventInfo info = 1;
    -    repeated RunResultMsg results = 2;
    -    float elapsed_time = 3;
    -    google.protobuf.Timestamp generated_at = 4;
    -    bool success = 5;
    +    EndRunResult data = 2;
    +}
    +
    +// Q038
    +message NoNodesSelected {
    +}
    +
    +message NoNodesSelectedMsg {
    +    EventInfo info = 1;
    +    NoNodesSelected data = 2;
     }
     
     // Q038
    @@ -1392,330 +1766,579 @@ message NoNodesSelected {
     
     // W002
     message CatchableExceptionOnRun {
    +    NodeInfo node_info = 1;
    +    string exc = 2;
    +    string exc_info = 3;
    +}
    +
    +message CatchableExceptionOnRunMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    string exc = 3;
    -    string exc_info = 4;
    +    CatchableExceptionOnRun data = 2;
     }
     
     // W003
    -message InternalExceptionOnRun {
    +message InternalErrorOnRun {
    +    string build_path = 1;
    +    string exc = 2;
    +}
    +
    +message InternalErrorOnRunMsg {
         EventInfo info = 1;
    -    string build_path = 2;
    -    string exc = 3;
    +    InternalErrorOnRun data = 2;
     }
     
     // W004
     message GenericExceptionOnRun {
    +    string build_path = 1;
    +    string unique_id = 2;
    +    string exc = 3;
    +}
    +
    +message GenericExceptionOnRunMsg {
         EventInfo info = 1;
    -    string build_path = 2;
    -    string unique_id = 3;
    -    string exc = 4;
    +    GenericExceptionOnRun data = 2;
     }
     
     // W005
     message NodeConnectionReleaseError {
    +    string node_name = 1;
    +    string exc = 2;
    +    string exc_info = 3;
    +}
    +
    +message NodeConnectionReleaseErrorMsg {
         EventInfo info = 1;
    -    string node_name = 2;
    -    string exc = 3;
    -    string exc_info = 4;
    +    NodeConnectionReleaseError data = 2;
     }
     
     // W006
     message FoundStats {
    +    string stat_line = 1;
    +}
    +
    +message FoundStatsMsg {
         EventInfo info = 1;
    -    string stat_line = 2;
    +    FoundStats data = 2;
     }
     
     // Z - Misc
     
     // Z001
     message MainKeyboardInterrupt {
    +}
    +
    +message MainKeyboardInterruptMsg {
         EventInfo info = 1;
    +    MainKeyboardInterrupt data = 2;
     }
     
     // Z002
     message MainEncounteredError {
    +    string exc = 1;
    +}
    +
    +message MainEncounteredErrorMsg {
         EventInfo info = 1;
    -    string exc = 2;
    +    MainEncounteredError data = 2;
     }
     
     // Z003
     message MainStackTrace {
    +    string stack_trace = 1;
    +}
    +
    +message MainStackTraceMsg {
         EventInfo info = 1;
    -    string stack_trace = 2;
    +    MainStackTrace data = 2;
     }
     
     // Z004
     message SystemErrorRetrievingModTime {
    +    string path = 1;
    +}
    +
    +message SystemErrorRetrievingModTimeMsg {
         EventInfo info = 1;
    -    string path = 2;
    +    SystemErrorRetrievingModTime data = 2;
     }
     
     // Z005
     message SystemCouldNotWrite {
    +    string path = 1;
    +    string reason = 2;
    +    string exc = 3;
    +}
    +
    +message SystemCouldNotWriteMsg {
         EventInfo info = 1;
    -    string path = 2;
    -    string reason = 3;
    -    string exc = 4;
    +    SystemCouldNotWrite data = 2;
     }
     
     // Z006
     message SystemExecutingCmd {
    +    repeated string cmd = 1;
    +}
    +
    +message SystemExecutingCmdMsg {
         EventInfo info = 1;
    -    repeated string cmd = 2;
    +    SystemExecutingCmd data = 2;
     }
     
     // Z007
    +message SystemStdOut{
    +    bytes bmsg = 1;
    +}
    +
     message SystemStdOutMsg {
         EventInfo info = 1;
    -    bytes bmsg = 2;
    +    SystemStdOut data = 2;
     }
     
     // Z008
    +message SystemStdErr {
    +    bytes bmsg = 1;
    +}
    +
     message SystemStdErrMsg {
         EventInfo info = 1;
    -    bytes bmsg = 2;
    +    SystemStdErr data = 2;
     }
     
     // Z009
     message SystemReportReturnCode {
    +    int32 returncode = 1;
    +}
    +
    +message SystemReportReturnCodeMsg {
         EventInfo info = 1;
    -    int32 returncode = 2;
    +    SystemReportReturnCode data = 2;
     }
     
     // Z010
     message TimingInfoCollected {
    +    NodeInfo node_info = 1;
    +    TimingInfoMsg timing_info = 2;
    +}
    +
    +message TimingInfoCollectedMsg {
         EventInfo info = 1;
    -    NodeInfo node_info = 2;
    -    TimingInfoMsg timing_info = 3;
    +    TimingInfoCollected data = 2;
     }
     
     // Z011
     message LogDebugStackTrace {
    +    string exc_info = 1;
    +}
    +
    +message LogDebugStackTraceMsg {
         EventInfo info = 1;
    -    string exc_info = 2;
    +    LogDebugStackTrace data = 2;
     }
     
     // Z012
     message CheckCleanPath {
    +    string path = 1;
    +}
    +
    +message CheckCleanPathMsg {
         EventInfo info = 1;
    -    string path = 2;
    +    CheckCleanPath data = 2;
     }
     
     // Z013
     message ConfirmCleanPath {
    +    string path = 1;
    +}
    +
    +message ConfirmCleanPathMsg {
         EventInfo info = 1;
    -    string path = 2;
    +    ConfirmCleanPath data = 2;
     }
     
     // Z014
     message ProtectedCleanPath {
    +    string path = 1;
    +}
    +
    +message ProtectedCleanPathMsg {
         EventInfo info = 1;
    -    string path = 2;
    +    ProtectedCleanPath data = 2;
     }
     
     // Z015
     message FinishedCleanPaths {
    +}
    +
    +message FinishedCleanPathsMsg {
         EventInfo info = 1;
    +    FinishedCleanPaths data = 2;
     }
     
     // Z016
     message OpenCommand {
    +    string open_cmd = 1;
    +    string profiles_dir = 2;
    +}
    +
    +message OpenCommandMsg {
         EventInfo info = 1;
    -    string open_cmd = 2;
    -    string profiles_dir = 3;
    +    OpenCommand data = 2;
     }
     
     // Z017
    -message EmptyLine {
    +message Formatting {
    +    string msg = 1;
    +}
    +
    +message FormattingMsg {
         EventInfo info = 1;
    +    Formatting data = 2;
     }
     
     // Z018
     message ServingDocsPort {
    +    string address = 1;
    +    int32 port = 2;
    +}
    +
    +message ServingDocsPortMsg {
         EventInfo info = 1;
    -    string address = 2;
    -    int32 port = 3;
    +    ServingDocsPort data = 2;
     }
     
     // Z019
     message ServingDocsAccessInfo {
    +    string port = 1;
    +}
    +
    +message ServingDocsAccessInfoMsg {
         EventInfo info = 1;
    -    string port = 2;
    +    ServingDocsAccessInfo data = 2;
     }
     
     // Z020
     message ServingDocsExitInfo {
    +}
    +
    +message ServingDocsExitInfoMsg {
         EventInfo info = 1;
    +    ServingDocsExitInfo data = 2;
     }
     
     // Z021
     message RunResultWarning {
    +    string resource_type = 1;
    +    string node_name = 2;
    +    string path = 3;
    +}
    +
    +message RunResultWarningMsg {
         EventInfo info = 1;
    -    string resource_type = 2;
    -    string node_name = 3;
    -    string path = 4;
    +    RunResultWarning data = 2;
     }
     
     // Z022
     message RunResultFailure {
    +    string resource_type = 1;
    +    string node_name = 2;
    +    string path = 3;
    +}
    +
    +message RunResultFailureMsg {
         EventInfo info = 1;
    -    string resource_type = 2;
    -    string node_name = 3;
    -    string path = 4;
    +    RunResultFailure data = 2;
     }
     
     // Z023
     message StatsLine {
    +    map stats = 1;
    +}
    +
    +message StatsLineMsg {
         EventInfo info = 1;
    -    map stats = 2;
    +    StatsLine data = 2;
     }
     
     // Z024
     message RunResultError {
    +    string msg = 1;
    +}
    +
    +message RunResultErrorMsg {
         EventInfo info = 1;
    -    string msg = 2;
    +    RunResultError data = 2;
     }
     
     // Z025
     message RunResultErrorNoMessage {
    +    string status = 1;
    +}
    +
    +message RunResultErrorNoMessageMsg {
         EventInfo info = 1;
    -    string status = 2;
    +    RunResultErrorNoMessage data = 2;
     }
     
     // Z026
     message SQLCompiledPath {
    +    string path = 1;
    +}
    +
    +message SQLCompiledPathMsg {
         EventInfo info = 1;
    -    string path = 2;
    +    SQLCompiledPath data = 2;
     }
     
     // Z027
     message CheckNodeTestFailure {
    +    string relation_name = 1;
    +}
    +
    +message CheckNodeTestFailureMsg {
         EventInfo info = 1;
    -    string relation_name = 2;
    +    CheckNodeTestFailure data = 2;
     }
     
     // Z028
     message FirstRunResultError {
    +    string msg = 1;
    +}
    +
    +message FirstRunResultErrorMsg {
         EventInfo info = 1;
    -    string msg = 2;
    +    FirstRunResultError data = 2;
     }
     
     // Z029
     message AfterFirstRunResultError {
    +    string msg = 1;
    +}
    +
    +message AfterFirstRunResultErrorMsg {
         EventInfo info = 1;
    -    string msg = 2;
    +    AfterFirstRunResultError data = 2;
     }
     
     // Z030
     message EndOfRunSummary {
    +    int32 num_errors = 1;
    +    int32 num_warnings = 2;
    +    bool keyboard_interrupt = 3;
    +}
    +
    +message EndOfRunSummaryMsg {
         EventInfo info = 1;
    -    int32 num_errors = 2;
    -    int32 num_warnings = 3;
    -    bool keyboard_interrupt = 4;
    +    EndOfRunSummary data = 2;
     }
     
     // Skipped Z031, Z032, Z033
     
     // Z034
     message LogSkipBecauseError {
    +    string schema = 1;
    +    string relation = 2;
    +    int32 index = 3;
    +    int32 total = 4;
    +}
    +
    +message LogSkipBecauseErrorMsg {
         EventInfo info = 1;
    -    string schema = 2;
    -    string relation = 3;
    -    int32 index = 4;
    -    int32 total = 5;
    +    LogSkipBecauseError data = 2;
     }
     
     // Z036
     message EnsureGitInstalled {
    +}
    +
    +message EnsureGitInstalledMsg {
         EventInfo info = 1;
    +    EnsureGitInstalled data = 2;
     }
     
     // Z037
     message DepsCreatingLocalSymlink {
    +}
    +
    +message DepsCreatingLocalSymlinkMsg {
         EventInfo info = 1;
    +    DepsCreatingLocalSymlink data = 2;
     }
     
     // Z038
     message DepsSymlinkNotAvailable {
    +}
    +
    +message DepsSymlinkNotAvailableMsg {
         EventInfo info = 1;
    +    DepsSymlinkNotAvailable data = 2;
     }
     
     // Z039
     message DisableTracking {
    +}
    +
    +message DisableTrackingMsg {
         EventInfo info = 1;
    +    DisableTracking data = 2;
     }
     
     // Z040
     message SendingEvent {
    +    string kwargs = 1;
    +}
    +
    +message SendingEventMsg {
         EventInfo info = 1;
    -    string kwargs = 2;
    +    SendingEvent data = 2;
     }
     
     // Z041
     message SendEventFailure {
    +}
    +
    +message SendEventFailureMsg {
         EventInfo info = 1;
    +    SendEventFailure data = 2;
     }
     
     // Z042
     message FlushEvents {
    +}
    +
    +message FlushEventsMsg {
         EventInfo info = 1;
    +    FlushEvents data = 2;
     }
     
     // Z043
     message FlushEventsFailure {
    +}
    +
    +message FlushEventsFailureMsg {
         EventInfo info = 1;
    +    FlushEventsFailure data = 2;
     }
     
     // Z044
     message TrackingInitializeFailure {
    +    string exc_info = 1;
    +}
    +
    +message TrackingInitializeFailureMsg {
         EventInfo info = 1;
    -    string exc_info = 2;
    +    TrackingInitializeFailure data = 2;
     }
     
     // Skipped Z045
     
     // Z046
     message RunResultWarningMessage {
    +    string msg = 1;
    +}
    +
    +message RunResultWarningMessageMsg {
         EventInfo info = 1;
    -    string msg = 2;
    +    RunResultWarningMessage data = 2;
    +}
    +
    +// Z047
    +message DebugCmdOut {
    +    string msg = 1;
    +}
    +
    +message DebugCmdOutMsg {
    +    EventInfo info = 1;
    +    DebugCmdOut data = 2;
    +}
    +
    +// Z048
    +message DebugCmdResult {
    +    string msg = 1;
    +}
    +
    +message DebugCmdResultMsg {
    +    EventInfo info = 1;
    +    DebugCmdResult data = 2;
    +}
    +
    +// Z049
    +message ListCmdOut {
    +    string msg = 1;
    +}
    +
    +message ListCmdOutMsg {
    +    EventInfo info = 1;
    +    ListCmdOut data = 2;
    +}
    +
    +// Z050
    +message Note {
    +    string msg = 1;
    +}
    +
    +message NoteMsg {
    +    EventInfo info = 1;
    +    Note data = 2;
     }
     
     // T - Integration tests
     
     // T001
     message IntegrationTestInfo {
    +    string msg = 1;
    +}
    +
    +message IntegrationTestInfoMsg {
         EventInfo info = 1;
    -    string msg = 2;
    +    IntegrationTestInfo data = 2;
     }
     
     // T002
     message IntegrationTestDebug {
    +    string msg = 1;
    +}
    +
    +message IntegrationTestDebugMsg {
         EventInfo info = 1;
    -    string msg = 2;
    +    IntegrationTestDebug data = 2;
     }
     
     // T003
     message IntegrationTestWarn {
    +    string msg = 1;
    +}
    +
    +message IntegrationTestWarnMsg {
         EventInfo info = 1;
    -    string msg = 2;
    +    IntegrationTestWarn data = 2;
     }
     
     // T004
     message IntegrationTestError {
    +    string msg = 1;
    +}
    +
    +message IntegrationTestErrorMsg {
         EventInfo info = 1;
    -    string msg = 2;
    +    IntegrationTestError data = 2;
     }
     
     // T005
     message IntegrationTestException {
    +    string msg = 1;
    +}
    +
    +message IntegrationTestExceptionMsg {
         EventInfo info = 1;
    -    string msg = 2;
    +    IntegrationTestException data = 2;
     }
     
     // T006
     message UnitTestInfo {
    +    string msg = 1;
    +}
    +
    +message UnitTestInfoMsg {
         EventInfo info = 1;
    -    string msg = 2;
    +    UnitTestInfo data = 2;
     }
    diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py
    index f56aaf51e4e..cdd48dfacd1 100644
    --- a/core/dbt/events/types.py
    +++ b/core/dbt/events/types.py
    @@ -11,6 +11,7 @@
         Cache,
         AdapterEventStringFunctor,
         EventStringFunctor,
    +    EventLevel,
     )
     from dbt.events.format import format_fancy_output_line, pluralize
     
    @@ -107,88 +108,49 @@ def message(self) -> str:
     
     
     @dataclass
    -class InvalidVarsYAML(ErrorLevel, pt.InvalidVarsYAML):
    +class InvalidOptionYAML(ErrorLevel, pt.InvalidOptionYAML):
         def code(self):
             return "A008"
     
         def message(self) -> str:
    -        return "The YAML provided in the --vars argument is not valid."
    +        return f"The YAML provided in the --{self.option_name} argument is not valid."
     
     
     @dataclass
    -class DbtProjectError(ErrorLevel, pt.DbtProjectError):
    +class LogDbtProjectError(ErrorLevel, pt.LogDbtProjectError):
         def code(self):
             return "A009"
     
         def message(self) -> str:
    -        return "Encountered an error while reading the project:"
    +        msg = "Encountered an error while reading the project:"
    +        if self.exc:
    +            msg += f"  ERROR: {str(self.exc)}"
    +        return msg
     
     
    -@dataclass
    -class DbtProjectErrorException(ErrorLevel, pt.DbtProjectErrorException):
    -    def code(self):
    -        return "A010"
    -
    -    def message(self) -> str:
    -        return f"  ERROR: {str(self.exc)}"
    +# Skipped A010
     
     
     @dataclass
    -class DbtProfileError(ErrorLevel, pt.DbtProfileError):
    +class LogDbtProfileError(ErrorLevel, pt.LogDbtProfileError):
         def code(self):
             return "A011"
     
         def message(self) -> str:
    -        return "Encountered an error while reading profiles:"
    -
    -
    -@dataclass
    -class DbtProfileErrorException(ErrorLevel, pt.DbtProfileErrorException):
    -    def code(self):
    -        return "A012"
    -
    -    def message(self) -> str:
    -        return f"  ERROR: {str(self.exc)}"
    -
    -
    -@dataclass
    -class ProfileListTitle(InfoLevel, pt.ProfileListTitle):
    -    def code(self):
    -        return "A013"
    -
    -    def message(self) -> str:
    -        return "Defined profiles:"
    -
    -
    -@dataclass
    -class ListSingleProfile(InfoLevel, pt.ListSingleProfile):
    -    def code(self):
    -        return "A014"
    -
    -    def message(self) -> str:
    -        return f" - {self.profile}"
    -
    -
    -@dataclass
    -class NoDefinedProfiles(InfoLevel, pt.NoDefinedProfiles):
    -    def code(self):
    -        return "A015"
    -
    -    def message(self) -> str:
    -        return "There are no profiles defined in your profiles.yml file"
    -
    -
    -@dataclass
    -class ProfileHelpMessage(InfoLevel, pt.ProfileHelpMessage):
    -    def code(self):
    -        return "A016"
    +        msg = "Encountered an error while reading profiles:\n" f"  ERROR: {str(self.exc)}"
    +        if self.profiles:
    +            msg += "Defined profiles:\n"
    +            for profile in self.profiles:
    +                msg += f" - {profile}"
    +        else:
    +            msg += "There are no profiles defined in your profiles.yml file"
     
    -    def message(self) -> str:
    -        return """
    +        msg += """
     For more information on configuring profiles, please consult the dbt docs:
     
     https://docs.getdbt.com/docs/configure-your-profile
     """
    +        return msg
     
     
     @dataclass
    @@ -290,7 +252,7 @@ def code(self):
             return "A026"
     
         def message(self) -> str:
    -        return """
    +        return f"""
     Your new dbt project "{self.project_name}" was created!
     
     For more information on how to configure the profiles.yml file,
    @@ -392,7 +354,6 @@ def message(self):
                 "\n  'sql'              -> 'expression'"
                 "\n  'type'             -> 'calculation_method'"
                 "\n  'type: expression' -> 'calculation_method: derived'"
    -            "\nThe old metric parameter names will be fully deprecated in v1.4."
                 f"\nPlease remove them from the metric definition of metric '{self.metric_name}'"
                 "\nRelevant issue here: https://github.com/dbt-labs/dbt-core/issues/5849"
             )
    @@ -416,6 +377,22 @@ def message(self):
             return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}"))
     
     
    +@dataclass
    +class InternalDeprecation(WarnLevel, pt.InternalDeprecation):
    +    def code(self):
    +        return "D008"
    +
    +    def message(self):
    +        extra_reason = ""
    +        if self.reason:
    +            extra_reason = f"\n{self.reason}"
    +        msg = (
    +            f"`{self.name}` is deprecated and will be removed in dbt-core version {self.version}\n\n"
    +            f"Adapter maintainers can resolve this deprecation by {self.suggested_action}. {extra_reason}"
    +        )
    +        return warning_tag(msg)
    +
    +
     # =======================================================
     # E - DB Adapter
     # =======================================================
    @@ -463,7 +440,7 @@ def code(self):
             return "E005"
     
         def message(self) -> str:
    -        return f'Acquiring new {self.conn_type} connection "{self.conn_name}"'
    +        return f"Acquiring new {self.conn_type} connection '{self.conn_name}'"
     
     
     @dataclass
    @@ -472,7 +449,7 @@ def code(self):
             return "E006"
     
         def message(self) -> str:
    -        return f"Re-using an available connection from the pool (formerly {self.conn_name})"
    +        return f"Re-using an available connection from the pool (formerly {self.orig_conn_name}, now {self.conn_name})"
     
     
     @dataclass
    @@ -539,7 +516,7 @@ def code(self):
         def message(self) -> str:
             return (
                 f'On "{self.conn_name}": cache miss for schema '
    -            '"{self.database}.{self.schema}", this is inefficient'
    +            f'"{self.database}.{self.schema}", this is inefficient'
             )
     
     
    @@ -615,130 +592,54 @@ def message(self) -> str:
             return f'Dropping schema "{self.relation}".'
     
     
    -# TODO pretty sure this is only ever called in dead code
    -# see: core/dbt/adapters/cache.py _add_link vs add_link
     @dataclass
    -class UncachedRelation(DebugLevel, Cache, pt.UncachedRelation):
    +class CacheAction(DebugLevel, Cache, pt.CacheAction):
         def code(self):
             return "E022"
     
    -    def message(self) -> str:
    -        return (
    -            f"{self.dep_key} references {str(self.ref_key)} "
    -            "but {self.ref_key.database}.{self.ref_key.schema}"
    -            "is not in the cache, skipping assumed external relation"
    -        )
    -
    -
    -@dataclass
    -class AddLink(DebugLevel, Cache, pt.AddLink):
    -    def code(self):
    -        return "E023"
    -
    -    def message(self) -> str:
    -        return f"adding link, {self.dep_key} references {self.ref_key}"
    -
    -
    -@dataclass
    -class AddRelation(DebugLevel, Cache, pt.AddRelation):
    -    def code(self):
    -        return "E024"
    -
    -    def message(self) -> str:
    -        return f"Adding relation: {str(self.relation)}"
    -
    -
    -@dataclass
    -class DropMissingRelation(DebugLevel, Cache, pt.DropMissingRelation):
    -    def code(self):
    -        return "E025"
    -
    -    def message(self) -> str:
    -        return f"dropped a nonexistent relationship: {str(self.relation)}"
    -
    -
    -@dataclass
    -class DropCascade(DebugLevel, Cache, pt.DropCascade):
    -    def code(self):
    -        return "E026"
    -
    -    def message(self) -> str:
    -        return f"drop {self.dropped} is cascading to {self.consequences}"
    -
    -
    -@dataclass
    -class DropRelation(DebugLevel, Cache, pt.DropRelation):
    -    def code(self):
    -        return "E027"
    -
    -    def message(self) -> str:
    -        return f"Dropping relation: {self.dropped}"
    -
    -
    -@dataclass
    -class UpdateReference(DebugLevel, Cache, pt.UpdateReference):
    -    def code(self):
    -        return "E028"
    -
    -    def message(self) -> str:
    -        return (
    -            f"updated reference from {self.old_key} -> {self.cached_key} to "
    -            "{self.new_key} -> {self.cached_key}"
    -        )
    -
    -
    -@dataclass
    -class TemporaryRelation(DebugLevel, Cache, pt.TemporaryRelation):
    -    def code(self):
    -        return "E029"
    -
    -    def message(self) -> str:
    -        return f"old key {self.key} not found in self.relations, assuming temporary"
    -
    +    def message(self):
    +        if self.action == "add_link":
    +            return f"adding link, {self.ref_key} references {self.ref_key_2}"
    +        elif self.action == "add_relation":
    +            return f"adding relation: {str(self.ref_key)}"
    +        elif self.action == "drop_missing_relation":
    +            return f"dropped a nonexistent relationship: {str(self.ref_key)}"
    +        elif self.action == "drop_cascade":
    +            return f"drop {self.ref_key} is cascading to {self.ref_list}"
    +        elif self.action == "drop_relation":
    +            return f"Dropping relation: {self.ref_key}"
    +        elif self.action == "update_reference":
    +            return (
    +                f"updated reference from {self.ref_key} -> {self.ref_key_3} to "
    +                f"{self.ref_key_2} -> {self.ref_key_3}"
    +            )
    +        elif self.action == "temporary_relation":
    +            return f"old key {self.ref_key} not found in self.relations, assuming temporary"
    +        elif self.action == "rename_relation":
    +            return f"Renaming relation {self.ref_key} to {self.ref_key_2}"
    +        elif self.action == "uncached_relation":
    +            return (
    +                f"{self.ref_key_2} references {str(self.ref_key)} "
    +                f"but {self.ref_key.database}.{self.ref_key.schema}"
    +                "is not in the cache, skipping assumed external relation"
    +            )
    +        else:
    +            return f"{self.ref_key}"
     
    -@dataclass
    -class RenameSchema(DebugLevel, Cache, pt.RenameSchema):
    -    def code(self):
    -        return "E030"
     
    -    def message(self) -> str:
    -        return f"Renaming relation {self.old_key} to {self.new_key}"
    +# Skipping E023, E024, E025, E026, E027, E028, E029, E030
     
     
     @dataclass
    -class DumpBeforeAddGraph(DebugLevel, Cache, pt.DumpBeforeAddGraph):
    +class CacheDumpGraph(DebugLevel, Cache, pt.CacheDumpGraph):
         def code(self):
             return "E031"
     
         def message(self) -> str:
    -        return f"before adding : {self.dump}"
    -
    -
    -@dataclass
    -class DumpAfterAddGraph(DebugLevel, Cache, pt.DumpAfterAddGraph):
    -    def code(self):
    -        return "E032"
    -
    -    def message(self) -> str:
    -        return f"after adding: {self.dump}"
    +        return f"{self.before_after} {self.action} : {self.dump}"
     
     
    -@dataclass
    -class DumpBeforeRenameSchema(DebugLevel, Cache, pt.DumpBeforeRenameSchema):
    -    def code(self):
    -        return "E033"
    -
    -    def message(self) -> str:
    -        return f"before rename: {self.dump}"
    -
    -
    -@dataclass
    -class DumpAfterRenameSchema(DebugLevel, Cache, pt.DumpAfterRenameSchema):
    -    def code(self):
    -        return "E034"
    -
    -    def message(self) -> str:
    -        return f"after rename: {self.dump}"
    +# Skipping E032, E033, E034
     
     
     @dataclass
    @@ -756,7 +657,7 @@ def code(self):
             return "E036"
     
         def message(self):
    -        pass
    +        return f"{self.exc_info}"
     
     
     @dataclass
    @@ -854,7 +755,7 @@ def message(self) -> str:
     
     
     @dataclass
    -class HookFinished(InfoLevel, pt.HookFinished):
    +class FinishedRunningStats(InfoLevel, pt.FinishedRunningStats):
         def code(self):
             return "E047"
     
    @@ -868,84 +769,15 @@ def message(self) -> str:
     
     
     @dataclass
    -class ParseCmdStart(InfoLevel, pt.ParseCmdStart):
    +class ParseCmdOut(InfoLevel, pt.ParseCmdOut):
         def code(self):
             return "I001"
     
         def message(self) -> str:
    -        return "Start parsing."
    -
    -
    -@dataclass
    -class ParseCmdCompiling(InfoLevel, pt.ParseCmdCompiling):
    -    def code(self):
    -        return "I002"
    -
    -    def message(self) -> str:
    -        return "Compiling."
    -
    -
    -@dataclass
    -class ParseCmdWritingManifest(InfoLevel, pt.ParseCmdWritingManifest):
    -    def code(self):
    -        return "I003"
    -
    -    def message(self) -> str:
    -        return "Writing manifest."
    -
    -
    -@dataclass
    -class ParseCmdDone(InfoLevel, pt.ParseCmdDone):
    -    def code(self):
    -        return "I004"
    -
    -    def message(self) -> str:
    -        return "Done."
    -
    -
    -@dataclass
    -class ManifestDependenciesLoaded(InfoLevel, pt.ManifestDependenciesLoaded):
    -    def code(self):
    -        return "I005"
    -
    -    def message(self) -> str:
    -        return "Dependencies loaded"
    -
    -
    -@dataclass
    -class ManifestLoaderCreated(InfoLevel, pt.ManifestLoaderCreated):
    -    def code(self):
    -        return "I006"
    -
    -    def message(self) -> str:
    -        return "ManifestLoader created"
    -
    -
    -@dataclass
    -class ManifestLoaded(InfoLevel, pt.ManifestLoaded):
    -    def code(self):
    -        return "I007"
    -
    -    def message(self) -> str:
    -        return "Manifest loaded"
    -
    -
    -@dataclass
    -class ManifestChecked(InfoLevel, pt.ManifestChecked):
    -    def code(self):
    -        return "I008"
    -
    -    def message(self) -> str:
    -        return "Manifest checked"
    +        return self.msg
     
     
    -@dataclass
    -class ManifestFlatGraphBuilt(InfoLevel, pt.ManifestFlatGraphBuilt):
    -    def code(self):
    -        return "I009"
    -
    -    def message(self) -> str:
    -        return "Flat graph built"
    +# Skipping I002, I003, I004, I005, I006, I007, I008, I009
     
     
     @dataclass
    @@ -975,19 +807,11 @@ def message(self) -> str:
             return f"Parsing {self.path}"
     
     
    -@dataclass
    -class PartialParsingFullReparseBecauseOfError(
    -    InfoLevel, pt.PartialParsingFullReparseBecauseOfError
    -):
    -    def code(self):
    -        return "I013"
    -
    -    def message(self) -> str:
    -        return "Partial parsing enabled but an error occurred. Switching to a full re-parse."
    +# Skipping I013
     
     
     @dataclass
    -class PartialParsingExceptionFile(DebugLevel, pt.PartialParsingExceptionFile):
    +class PartialParsingErrorProcessingFile(DebugLevel, pt.PartialParsingErrorProcessingFile):
         def code(self):
             return "I014"
     
    @@ -995,17 +819,11 @@ def message(self) -> str:
             return f"Partial parsing exception processing file {self.file}"
     
     
    -@dataclass
    -class PartialParsingFile(DebugLevel, pt.PartialParsingFile):
    -    def code(self):
    -        return "I015"
    -
    -    def message(self) -> str:
    -        return f"PP file: {self.file_id}"
    +# Skipped I015
     
     
     @dataclass
    -class PartialParsingException(DebugLevel, pt.PartialParsingException):
    +class PartialParsingError(DebugLevel, pt.PartialParsingError):
         def code(self):
             return "I016"
     
    @@ -1022,112 +840,28 @@ def message(self) -> str:
             return "Partial parsing enabled, no changes found, skipping parsing"
     
     
    -@dataclass
    -class PartialParsingMacroChangeStartFullParse(
    -    InfoLevel, pt.PartialParsingMacroChangeStartFullParse
    -):
    -    def code(self):
    -        return "I018"
    -
    -    def message(self) -> str:
    -        return "Change detected to override macro used during parsing. Starting full parse."
    -
    -
    -@dataclass
    -class PartialParsingProjectEnvVarsChanged(InfoLevel, pt.PartialParsingProjectEnvVarsChanged):
    -    def code(self):
    -        return "I019"
    -
    -    def message(self) -> str:
    -        return "Unable to do partial parsing because env vars used in dbt_project.yml have changed"
    -
    -
    -@dataclass
    -class PartialParsingProfileEnvVarsChanged(InfoLevel, pt.PartialParsingProfileEnvVarsChanged):
    -    def code(self):
    -        return "I020"
    -
    -    def message(self) -> str:
    -        return "Unable to do partial parsing because env vars used in profiles.yml have changed"
    -
    -
    -@dataclass
    -class PartialParsingDeletedMetric(DebugLevel, pt.PartialParsingDeletedMetric):
    -    def code(self):
    -        return "I021"
    -
    -    def message(self) -> str:
    -        return f"Partial parsing: deleted metric {self.unique_id}"
    -
    -
    -@dataclass
    -class ManifestWrongMetadataVersion(DebugLevel, pt.ManifestWrongMetadataVersion):
    -    def code(self):
    -        return "I022"
    -
    -    def message(self) -> str:
    -        return (
    -            "Manifest metadata did not contain correct version. "
    -            f"Contained '{self.version}' instead."
    -        )
    -
    -
    -@dataclass
    -class PartialParsingVersionMismatch(InfoLevel, pt.PartialParsingVersionMismatch):
    -    def code(self):
    -        return "I023"
    -
    -    def message(self) -> str:
    -        return (
    -            "Unable to do partial parsing because of a dbt version mismatch. "
    -            f"Saved manifest version: {self.saved_version}. "
    -            f"Current version: {self.current_version}."
    -        )
    +# Skipped I018, I019, I020, I021, I022, I023
     
     
     @dataclass
    -class PartialParsingFailedBecauseConfigChange(
    -    InfoLevel, pt.PartialParsingFailedBecauseConfigChange
    -):
    +class UnableToPartialParse(InfoLevel, pt.UnableToPartialParse):
         def code(self):
             return "I024"
     
         def message(self) -> str:
    -        return (
    -            "Unable to do partial parsing because config vars, "
    -            "config profile, or config target have changed"
    -        )
    +        return f"Unable to do partial parsing because {self.reason}"
     
     
     @dataclass
    -class PartialParsingFailedBecauseProfileChange(
    -    InfoLevel, pt.PartialParsingFailedBecauseProfileChange
    -):
    +class StateCheckVarsHash(DebugLevel, pt.StateCheckVarsHash):
         def code(self):
             return "I025"
     
         def message(self) -> str:
    -        return "Unable to do partial parsing because profile has changed"
    -
    -
    -@dataclass
    -class PartialParsingFailedBecauseNewProjectDependency(
    -    InfoLevel, pt.PartialParsingFailedBecauseNewProjectDependency
    -):
    -    def code(self):
    -        return "I026"
    -
    -    def message(self) -> str:
    -        return "Unable to do partial parsing because a project dependency has been added"
    +        return f"checksum: {self.checksum}, vars: {self.vars}, profile: {self.profile}, target: {self.target}, version: {self.version}"
     
     
    -@dataclass
    -class PartialParsingFailedBecauseHashChanged(InfoLevel, pt.PartialParsingFailedBecauseHashChanged):
    -    def code(self):
    -        return "I027"
    -
    -    def message(self) -> str:
    -        return "Unable to do partial parsing because a project config has changed"
    +# Skipped I025, I026, I026, I027
     
     
     @dataclass
    @@ -1148,96 +882,7 @@ def message(self) -> str:
             return f"Failed to load parsed file from disk at {self.path}: {self.exc}"
     
     
    -@dataclass
    -class PartialParseSaveFileNotFound(InfoLevel, pt.PartialParseSaveFileNotFound):
    -    def code(self):
    -        return "I030"
    -
    -    def message(self) -> str:
    -        return "Partial parse save file not found. Starting full parse."
    -
    -
    -@dataclass
    -class StaticParserCausedJinjaRendering(DebugLevel, pt.StaticParserCausedJinjaRendering):
    -    def code(self):
    -        return "I031"
    -
    -    def message(self) -> str:
    -        return f"1605: jinja rendering because of STATIC_PARSER flag. file: {self.path}"
    -
    -
    -# TODO: Experimental/static parser uses these for testing and some may be a good use case for
    -#       the `TestLevel` logger once we implement it.  Some will probably stay `DebugLevel`.
    -@dataclass
    -class UsingExperimentalParser(DebugLevel, pt.UsingExperimentalParser):
    -    def code(self):
    -        return "I032"
    -
    -    def message(self) -> str:
    -        return f"1610: conducting experimental parser sample on {self.path}"
    -
    -
    -@dataclass
    -class SampleFullJinjaRendering(DebugLevel, pt.SampleFullJinjaRendering):
    -    def code(self):
    -        return "I033"
    -
    -    def message(self) -> str:
    -        return f"1611: conducting full jinja rendering sample on {self.path}"
    -
    -
    -@dataclass
    -class StaticParserFallbackJinjaRendering(DebugLevel, pt.StaticParserFallbackJinjaRendering):
    -    def code(self):
    -        return "I034"
    -
    -    def message(self) -> str:
    -        return f"1602: parser fallback to jinja rendering on {self.path}"
    -
    -
    -@dataclass
    -class StaticParsingMacroOverrideDetected(DebugLevel, pt.StaticParsingMacroOverrideDetected):
    -    def code(self):
    -        return "I035"
    -
    -    def message(self) -> str:
    -        return f"1601: detected macro override of ref/source/config in the scope of {self.path}"
    -
    -
    -@dataclass
    -class StaticParserSuccess(DebugLevel, pt.StaticParserSuccess):
    -    def code(self):
    -        return "I036"
    -
    -    def message(self) -> str:
    -        return f"1699: static parser successfully parsed {self.path}"
    -
    -
    -@dataclass
    -class StaticParserFailure(DebugLevel, pt.StaticParserFailure):
    -    def code(self):
    -        return "I037"
    -
    -    def message(self) -> str:
    -        return f"1603: static parser failed on {self.path}"
    -
    -
    -@dataclass
    -class ExperimentalParserSuccess(DebugLevel, pt.ExperimentalParserSuccess):
    -    def code(self):
    -        return "I038"
    -
    -    def message(self) -> str:
    -        return f"1698: experimental parser successfully parsed {self.path}"
    -
    -
    -@dataclass
    -class ExperimentalParserFailure(DebugLevel, pt.ExperimentalParserFailure):
    -    def code(self):
    -        return "I039"
    -
    -    def message(self) -> str:
    -        return f"1604: experimental parser failed on {self.path}"
    +# Skipped I030-I039
     
     
     @dataclass
    @@ -1255,104 +900,32 @@ def message(self) -> str:
     
     
     @dataclass
    -class PartialParsingAddedFile(DebugLevel, pt.PartialParsingAddedFile):
    +class PartialParsingFile(DebugLevel, pt.PartialParsingFile):
         def code(self):
             return "I041"
     
         def message(self) -> str:
    -        return f"Partial parsing: added file: {self.file_id}"
    -
    -
    -@dataclass
    -class PartialParsingDeletedFile(DebugLevel, pt.PartialParsingDeletedFile):
    -    def code(self):
    -        return "I042"
    -
    -    def message(self) -> str:
    -        return f"Partial parsing: deleted file: {self.file_id}"
    -
    -
    -@dataclass
    -class PartialParsingUpdatedFile(DebugLevel, pt.PartialParsingUpdatedFile):
    -    def code(self):
    -        return "I043"
    -
    -    def message(self) -> str:
    -        return f"Partial parsing: updated file: {self.file_id}"
    -
    -
    -@dataclass
    -class PartialParsingNodeMissingInSourceFile(DebugLevel, pt.PartialParsingNodeMissingInSourceFile):
    -    def code(self):
    -        return "I044"
    -
    -    def message(self) -> str:
    -        return f"Partial parsing: nodes list not found in source_file {self.file_id}"
    -
    -
    -@dataclass
    -class PartialParsingMissingNodes(DebugLevel, pt.PartialParsingMissingNodes):
    -    def code(self):
    -        return "I045"
    -
    -    def message(self) -> str:
    -        return f"No nodes found for source file {self.file_id}"
    -
    -
    -@dataclass
    -class PartialParsingChildMapMissingUniqueID(DebugLevel, pt.PartialParsingChildMapMissingUniqueID):
    -    def code(self):
    -        return "I046"
    -
    -    def message(self) -> str:
    -        return f"Partial parsing: {self.unique_id} not found in child_map"
    -
    -
    -@dataclass
    -class PartialParsingUpdateSchemaFile(DebugLevel, pt.PartialParsingUpdateSchemaFile):
    -    def code(self):
    -        return "I047"
    -
    -    def message(self) -> str:
    -        return f"Partial parsing: update schema file: {self.file_id}"
    -
    -
    -@dataclass
    -class PartialParsingDeletedSource(DebugLevel, pt.PartialParsingDeletedSource):
    -    def code(self):
    -        return "I048"
    -
    -    def message(self) -> str:
    -        return f"Partial parsing: deleted source {self.unique_id}"
    +        return f"Partial parsing: {self.operation} file: {self.file_id}"
     
     
    -@dataclass
    -class PartialParsingDeletedExposure(DebugLevel, pt.PartialParsingDeletedExposure):
    -    def code(self):
    -        return "I049"
    -
    -    def message(self) -> str:
    -        return f"Partial parsing: deleted exposure {self.unique_id}"
    +# Skipped I042, I043, I044, I045, I046, I047, I048, I049
     
     
     @dataclass
    -class InvalidDisabledTargetInTestNode(WarnLevel, pt.InvalidDisabledTargetInTestNode):
    +class InvalidDisabledTargetInTestNode(DebugLevel, pt.InvalidDisabledTargetInTestNode):
         def code(self):
             return "I050"
     
         def message(self) -> str:
    -
             target_package_string = ""
    +
             if self.target_package != target_package_string:
    -            target_package_string = "in package '{}' ".format(self.target_package)
    -
    -        msg = "{} '{}' ({}) depends on a {} named '{}' {}which is disabled".format(
    -            self.resource_type_title,
    -            self.unique_id,
    -            self.original_file_path,
    -            self.target_kind,
    -            self.target_name,
    -            target_package_string,
    +            target_package_string = f"in package '{self.target_package}' "
    +
    +        msg = (
    +            f"{self.resource_type_title} '{self.unique_id}' "
    +            f"({self.original_file_path}) depends on a {self.target_kind} "
    +            f"named '{self.target_name}' {target_package_string}which is disabled"
             )
     
             return warning_tag(msg)
    @@ -1476,7 +1049,7 @@ def message(self) -> str:
     
     
     @dataclass
    -class MacroPatchNotFound(WarnLevel, pt.MacroPatchNotFound):
    +class MacroNotFoundForPatch(WarnLevel, pt.MacroNotFoundForPatch):
         def code(self):
             return "I059"
     
    @@ -1502,17 +1075,14 @@ def message(self) -> str:
                 reason = "was not found"
     
             target_package_string = ""
    +
             if self.target_package is not None:
    -            target_package_string = "in package '{}' ".format(self.target_package)
    -
    -        msg = "{} '{}' ({}) depends on a {} named '{}' {}which {}".format(
    -            self.resource_type_title,
    -            self.unique_id,
    -            self.original_file_path,
    -            self.target_kind,
    -            self.target_name,
    -            target_package_string,
    -            reason,
    +            target_package_string = f"in package '{self.target_package}' "
    +
    +        msg = (
    +            f"{self.resource_type_title} '{self.unique_id}' "
    +            f"({self.original_file_path}) depends on a {self.target_kind} "
    +            f"named '{self.target_name}' {target_package_string}which {reason}"
             )
     
             return warning_tag(msg)
    @@ -1527,6 +1097,26 @@ def message(self) -> str:
             return self.msg
     
     
    +@dataclass
    +class JinjaLogInfo(InfoLevel, EventStringFunctor, pt.JinjaLogInfo):
    +    def code(self):
    +        return "I062"
    +
    +    def message(self) -> str:
    +        # This is for the log method used in macros so msg cannot be built here
    +        return self.msg
    +
    +
    +@dataclass
    +class JinjaLogDebug(DebugLevel, EventStringFunctor, pt.JinjaLogDebug):
    +    def code(self):
    +        return "I063"
    +
    +    def message(self) -> str:
    +        # This is for the log method used in macros so msg cannot be built here
    +        return self.msg
    +
    +
     # =======================================================
     # M - Deps generation
     # =======================================================
    @@ -1538,7 +1128,7 @@ def code(self):
             return "M001"
     
         def message(self) -> str:
    -        return f"  Subdirectory specified: {self.subdir}, using sparse checkout."
    +        return f"Subdirectory specified: {self.subdir}, using sparse checkout."
     
     
     @dataclass
    @@ -1547,7 +1137,7 @@ def code(self):
             return "M002"
     
         def message(self) -> str:
    -        return f"  Checking out revision {self.revision}."
    +        return f"Checking out revision {self.revision}."
     
     
     @dataclass
    @@ -1583,7 +1173,7 @@ def code(self):
             return "M006"
     
         def message(self) -> str:
    -        return f"  Updated checkout from {self.start_sha} to {self.end_sha}."
    +        return f"Updated checkout from {self.start_sha} to {self.end_sha}."
     
     
     @dataclass
    @@ -1592,7 +1182,7 @@ def code(self):
             return "M007"
     
         def message(self) -> str:
    -        return f"  Checked out at {self.end_sha}."
    +        return f"Checked out at {self.end_sha}."
     
     
     @dataclass
    @@ -1625,26 +1215,6 @@ def message(self) -> str:
             )
     
     
    -@dataclass
    -class JinjaLogInfo(InfoLevel, EventStringFunctor, pt.JinjaLogInfo):
    -    def code(self):
    -        return "M011"
    -
    -    def message(self) -> str:
    -        # This is for the log method used in macros so msg cannot be built here
    -        return self.msg
    -
    -
    -@dataclass
    -class JinjaLogDebug(DebugLevel, EventStringFunctor, pt.JinjaLogDebug):
    -    def code(self):
    -        return "M012"
    -
    -    def message(self) -> str:
    -        # This is for the log method used in macros so msg cannot be built here
    -        return self.msg
    -
    -
     @dataclass
     class DepsNoPackagesFound(InfoLevel, pt.DepsNoPackagesFound):
         def code(self):
    @@ -1669,7 +1239,7 @@ def code(self):
             return "M015"
     
         def message(self) -> str:
    -        return f"  Installed from {self.version_name}"
    +        return f"Installed from {self.version_name}"
     
     
     @dataclass
    @@ -1678,7 +1248,7 @@ def code(self):
             return "M016"
     
         def message(self) -> str:
    -        return f"  Updated version available: {self.version_latest}"
    +        return f"Updated version available: {self.version_latest}"
     
     
     @dataclass
    @@ -1687,7 +1257,7 @@ def code(self):
             return "M017"
     
         def message(self) -> str:
    -        return "  Up to date!"
    +        return "Up to date!"
     
     
     @dataclass
    @@ -1696,7 +1266,7 @@ def code(self):
             return "M018"
     
         def message(self) -> str:
    -        return f"   and subdirectory {self.subdirectory}"
    +        return f"and subdirectory {self.subdirectory}"
     
     
     @dataclass
    @@ -1705,10 +1275,8 @@ def code(self):
             return "M019"
     
         def message(self) -> str:
    -        return "Updates available for packages: {} \
    -                \nUpdate your versions in packages.yml, then run dbt deps".format(
    -            self.packages.value
    -        )
    +        return f"Updates available for packages: {self.packages.value} \
    +                \nUpdate your versions in packages.yml, then run dbt deps"
     
     
     @dataclass
    @@ -1865,15 +1433,6 @@ def message(self) -> str:
             return self.header
     
     
    -@dataclass
    -class SeedHeaderSeparator(InfoLevel, pt.SeedHeaderSeparator):
    -    def code(self):
    -        return "Q005"
    -
    -    def message(self) -> str:
    -        return "-" * self.len_header
    -
    -
     @dataclass
     class SQLRunnerException(DebugLevel, pt.SQLRunnerException):  # noqa
         def code(self):
    @@ -1914,17 +1473,16 @@ def message(self) -> str:
         @classmethod
         def status_to_level(cls, status):
             # The statuses come from TestStatus
    -        # TODO should this return EventLevel enum instead?
             level_lookup = {
    -            "fail": "error",
    -            "pass": "info",
    -            "warn": "warn",
    -            "error": "error",
    +            "fail": EventLevel.ERROR,
    +            "pass": EventLevel.INFO,
    +            "warn": EventLevel.WARN,
    +            "error": EventLevel.ERROR,
             }
             if status in level_lookup:
                 return level_lookup[status]
             else:
    -            return "info"
    +            return EventLevel.INFO
     
     
     # Skipped Q008, Q009, Q010
    @@ -2046,15 +1604,15 @@ def status_to_level(cls, status):
             # The statuses come from FreshnessStatus
             # TODO should this return EventLevel enum instead?
             level_lookup = {
    -            "runtime error": "error",
    -            "pass": "info",
    -            "warn": "warn",
    -            "error": "error",
    +            "runtime error": EventLevel.ERROR,
    +            "pass": EventLevel.INFO,
    +            "warn": EventLevel.WARN,
    +            "error": EventLevel.ERROR,
             }
             if status in level_lookup:
                 return level_lookup[status]
             else:
    -            return "info"
    +            return EventLevel.INFO
     
     
     # Skipped Q019, Q020, Q021
    @@ -2066,7 +1624,7 @@ def code(self):
             return "Q022"
     
         def message(self) -> str:
    -        msg = "CANCEL query {}".format(self.conn_name)
    +        msg = f"CANCEL query {self.conn_name}"
             return format_fancy_output_line(msg=msg, status=red("CANCEL"), index=None, total=None)
     
     
    @@ -2168,7 +1726,7 @@ def code(self):
             return "Q033"
     
         def message(self) -> str:
    -        msg = "OK hook: {}".format(self.statement)
    +        msg = f"OK hook: {self.statement}"
             return format_fancy_output_line(
                 msg=msg,
                 status=green(self.status),
    @@ -2247,20 +1805,18 @@ def message(self) -> str:
     
     
     @dataclass
    -class InternalExceptionOnRun(DebugLevel, pt.InternalExceptionOnRun):
    +class InternalErrorOnRun(DebugLevel, pt.InternalErrorOnRun):
         def code(self):
             return "W003"
     
         def message(self) -> str:
    -        prefix = "Internal error executing {}".format(self.build_path)
    +        prefix = f"Internal error executing {self.build_path}"
     
             internal_error_string = """This is an error in dbt. Please try again. If \
     the error persists, open an issue at https://github.com/dbt-labs/dbt-core
     """.strip()
     
    -        return "{prefix}\n{error}\n\n{note}".format(
    -            prefix=red(prefix), error=str(self.exc).strip(), note=internal_error_string
    -        )
    +        return f"{red(prefix)}\n" f"{str(self.exc).strip()}\n\n" f"{internal_error_string}"
     
     
     @dataclass
    @@ -2272,8 +1828,8 @@ def message(self) -> str:
             node_description = self.build_path
             if node_description is None:
                 node_description = self.unique_id
    -        prefix = "Unhandled error while executing {}".format(node_description)
    -        return "{prefix}\n{error}".format(prefix=red(prefix), error=str(self.exc).strip())
    +        prefix = f"Unhandled error while executing {node_description}"
    +        return f"{red(prefix)}\n{str(self.exc).strip()}"
     
     
     @dataclass
    @@ -2282,7 +1838,7 @@ def code(self):
             return "W005"
     
         def message(self) -> str:
    -        return "Error releasing connection for node {}: {!s}".format(self.node_name, self.exc)
    +        return f"Error releasing connection for node {self.node_name}: {str(self.exc)}"
     
     
     @dataclass
    @@ -2357,7 +1913,7 @@ def message(self) -> str:
     
     
     @dataclass
    -class SystemStdOutMsg(DebugLevel, pt.SystemStdOutMsg):
    +class SystemStdOut(DebugLevel, pt.SystemStdOut):
         def code(self):
             return "Z007"
     
    @@ -2366,7 +1922,7 @@ def message(self) -> str:
     
     
     @dataclass
    -class SystemStdErrMsg(DebugLevel, pt.SystemStdErrMsg):
    +class SystemStdErr(DebugLevel, pt.SystemStdErr):
         def code(self):
             return "Z008"
     
    @@ -2454,13 +2010,18 @@ def message(self) -> str:
             return msg
     
     
    +# We use events to create console output, but also think of them as a sequence of important and
    +# meaningful occurrences to be used for debugging and monitoring. The Formatting event helps eases
    +# the tension between these two goals by allowing empty lines, heading separators, and other
    +# formatting to be written to the console, while they can be ignored for other purposes. For
    +# general information that isn't simple formatting, the Note event should be used instead.
     @dataclass
    -class EmptyLine(InfoLevel, pt.EmptyLine):
    +class Formatting(InfoLevel, pt.Formatting):
         def code(self):
             return "Z017"
     
         def message(self) -> str:
    -        return ""
    +        return self.msg
     
     
     @dataclass
    @@ -2564,9 +2125,9 @@ def message(self) -> str:
             if self.keyboard_interrupt:
                 message = yellow("Exited because of keyboard interrupt.")
             elif self.num_errors > 0:
    -            message = red("Completed with {} and {}:".format(error_plural, warn_plural))
    +            message = red(f"Completed with {error_plural} and {warn_plural}:")
             elif self.num_warnings > 0:
    -            message = yellow("Completed with {}:".format(warn_plural))
    +            message = yellow(f"Completed with {warn_plural}:")
             else:
                 message = green("Completed successfully")
             return message
    @@ -2609,7 +2170,7 @@ def code(self):
             return "Z037"
     
         def message(self) -> str:
    -        return "  Creating symlink to local dependency."
    +        return "Creating symlink to local dependency."
     
     
     @dataclass
    @@ -2618,7 +2179,7 @@ def code(self):
             return "Z038"
     
         def message(self) -> str:
    -        return "  Symlinks are not available on this OS, copying dependency."
    +        return "Symlinks are not available on this OS, copying dependency."
     
     
     @dataclass
    @@ -2688,3 +2249,41 @@ def code(self):
         def message(self) -> str:
             # This is the message on the result object, cannot be formatted in event
             return self.msg
    +
    +
    +@dataclass
    +class DebugCmdOut(InfoLevel, pt.DebugCmdOut):
    +    def code(self):
    +        return "Z047"
    +
    +    def message(self) -> str:
    +        return self.msg
    +
    +
    +@dataclass
    +class DebugCmdResult(InfoLevel, pt.DebugCmdResult):
    +    def code(self):
    +        return "Z048"
    +
    +    def message(self) -> str:
    +        return self.msg
    +
    +
    +@dataclass
    +class ListCmdOut(InfoLevel, pt.ListCmdOut):
    +    def code(self):
    +        return "Z049"
    +
    +    def message(self) -> str:
    +        return self.msg
    +
    +
    +# The Note event provides a way to log messages which aren't likely to be useful as more structured events.
    +# For conslole formatting text like empty lines and separator bars, use the Formatting event instead.
    +@dataclass
    +class Note(InfoLevel, pt.Note):
    +    def code(self):
    +        return "Z050"
    +
    +    def message(self) -> str:
    +        return self.msg
    diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py
    index 515ec86054b..4e7b6c9fe6a 100644
    --- a/core/dbt/exceptions.py
    +++ b/core/dbt/exceptions.py
    @@ -3,8 +3,8 @@
     import re
     from typing import Any, Dict, List, Mapping, NoReturn, Optional, Union
     
    -# from dbt.contracts.graph import ManifestNode # or ParsedNode?
     from dbt.dataclass_schema import ValidationError
    +from dbt.internal_deprecations import deprecated
     from dbt.events.functions import warn_or_error
     from dbt.events.helpers import env_secrets, scrub_secrets
     from dbt.events.types import JinjaLogWarning
    @@ -38,7 +38,7 @@ def data(self):
             }
     
     
    -class InternalException(Exception):
    +class DbtInternalError(Exception):
         def __init__(self, msg: str):
             self.stack: List = []
             self.msg = scrub_secrets(msg, env_secrets())
    @@ -79,7 +79,7 @@ def __str__(self):
             return lines[0] + "\n" + "\n".join(["  " + line for line in lines[1:]])
     
     
    -class RuntimeException(RuntimeError, Exception):
    +class DbtRuntimeError(RuntimeError, Exception):
         CODE = 10001
         MESSAGE = "Runtime error"
     
    @@ -172,72 +172,7 @@ def data(self):
             return result
     
     
    -class RPCFailureResult(RuntimeException):
    -    CODE = 10002
    -    MESSAGE = "RPC execution error"
    -
    -
    -class RPCTimeoutException(RuntimeException):
    -    CODE = 10008
    -    MESSAGE = "RPC timeout error"
    -
    -    def __init__(self, timeout: Optional[float]):
    -        super().__init__(self.MESSAGE)
    -        self.timeout = timeout
    -
    -    def data(self):
    -        result = super().data()
    -        result.update(
    -            {
    -                "timeout": self.timeout,
    -                "message": f"RPC timed out after {self.timeout}s",
    -            }
    -        )
    -        return result
    -
    -
    -class RPCKilledException(RuntimeException):
    -    CODE = 10009
    -    MESSAGE = "RPC process killed"
    -
    -    def __init__(self, signum: int):
    -        self.signum = signum
    -        self.msg = f"RPC process killed by signal {self.signum}"
    -        super().__init__(self.msg)
    -
    -    def data(self):
    -        return {
    -            "signum": self.signum,
    -            "message": self.msg,
    -        }
    -
    -
    -class RPCCompiling(RuntimeException):
    -    CODE = 10010
    -    MESSAGE = 'RPC server is compiling the project, call the "status" method for' " compile status"
    -
    -    def __init__(self, msg: str = None, node=None):
    -        if msg is None:
    -            msg = "compile in progress"
    -        super().__init__(msg, node)
    -
    -
    -class RPCLoadException(RuntimeException):
    -    CODE = 10011
    -    MESSAGE = (
    -        'RPC server failed to compile project, call the "status" method for' " compile status"
    -    )
    -
    -    def __init__(self, cause: Dict[str, Any]):
    -        self.cause = cause
    -        self.msg = f'{self.MESSAGE}: {self.cause["message"]}'
    -        super().__init__(self.msg)
    -
    -    def data(self):
    -        return {"cause": self.cause, "message": self.msg}
    -
    -
    -class DatabaseException(RuntimeException):
    +class DbtDatabaseError(DbtRuntimeError):
         CODE = 10003
         MESSAGE = "Database Error"
     
    @@ -247,14 +182,14 @@ def process_stack(self):
             if hasattr(self.node, "build_path") and self.node.build_path:
                 lines.append(f"compiled Code at {self.node.build_path}")
     
    -        return lines + RuntimeException.process_stack(self)
    +        return lines + DbtRuntimeError.process_stack(self)
     
         @property
         def type(self):
             return "Database"
     
     
    -class CompilationException(RuntimeException):
    +class CompilationError(DbtRuntimeError):
         CODE = 10004
         MESSAGE = "Compilation Error"
     
    @@ -274,16 +209,16 @@ def _fix_dupe_msg(self, path_1: str, path_2: str, name: str, type_name: str) ->
                 )
     
     
    -class RecursionException(RuntimeException):
    +class RecursionError(DbtRuntimeError):
         pass
     
     
    -class ValidationException(RuntimeException):
    +class DbtValidationError(DbtRuntimeError):
         CODE = 10005
         MESSAGE = "Validation Error"
     
     
    -class ParsingException(RuntimeException):
    +class ParsingError(DbtRuntimeError):
         CODE = 10015
         MESSAGE = "Parsing Error"
     
    @@ -293,7 +228,7 @@ def type(self):
     
     
     # TODO: this isn't raised in the core codebase.  Is it raised elsewhere?
    -class JSONValidationException(ValidationException):
    +class JSONValidationError(DbtValidationError):
         def __init__(self, typename, errors):
             self.typename = typename
             self.errors = errors
    @@ -303,11 +238,11 @@ def __init__(self, typename, errors):
     
         def __reduce__(self):
             # see https://stackoverflow.com/a/36342588 for why this is necessary
    -        return (JSONValidationException, (self.typename, self.errors))
    +        return (JSONValidationError, (self.typename, self.errors))
     
     
    -class IncompatibleSchemaException(RuntimeException):
    -    def __init__(self, expected: str, found: Optional[str]):
    +class IncompatibleSchemaError(DbtRuntimeError):
    +    def __init__(self, expected: str, found: Optional[str] = None):
             self.expected = expected
             self.found = found
             self.filename = "input file"
    @@ -334,11 +269,11 @@ def get_message(self) -> str:
         MESSAGE = "Incompatible Schema"
     
     
    -class JinjaRenderingException(CompilationException):
    +class JinjaRenderingError(CompilationError):
         pass
     
     
    -class UndefinedMacroException(CompilationException):
    +class UndefinedMacroError(CompilationError):
         def __str__(self, prefix: str = "! ") -> str:
             msg = super().__str__(prefix)
             return (
    @@ -348,28 +283,16 @@ def __str__(self, prefix: str = "! ") -> str:
             )
     
     
    -class UnknownAsyncIDException(Exception):
    -    CODE = 10012
    -    MESSAGE = "RPC server got an unknown async ID"
    -
    -    def __init__(self, task_id):
    -        self.task_id = task_id
    -
    -    def __str__(self):
    -        return f"{self.MESSAGE}: {self.task_id}"
    -
    -
    -class AliasException(ValidationException):
    +class AliasError(DbtValidationError):
         pass
     
     
    -class DependencyException(Exception):
    -    # this can happen due to raise_dependency_error and its callers
    +class DependencyError(Exception):
         CODE = 10006
         MESSAGE = "Dependency Error"
     
     
    -class DbtConfigError(RuntimeException):
    +class DbtConfigError(DbtRuntimeError):
         CODE = 10007
         MESSAGE = "DBT Configuration Error"
     
    @@ -387,7 +310,7 @@ def __str__(self, prefix="! ") -> str:
                 return f"{msg}\n\nError encountered in {self.path}"
     
     
    -class FailFastException(RuntimeException):
    +class FailFastError(DbtRuntimeError):
         CODE = 10013
         MESSAGE = "FailFast Error"
     
    @@ -412,7 +335,7 @@ class DbtProfileError(DbtConfigError):
         pass
     
     
    -class SemverException(Exception):
    +class SemverError(Exception):
         def __init__(self, msg: str = None):
             self.msg = msg
             if msg is not None:
    @@ -421,22 +344,22 @@ def __init__(self, msg: str = None):
                 super().__init__()
     
     
    -class VersionsNotCompatibleException(SemverException):
    +class VersionsNotCompatibleError(SemverError):
         pass
     
     
    -class NotImplementedException(Exception):
    +class NotImplementedError(Exception):
         def __init__(self, msg: str):
             self.msg = msg
             self.formatted_msg = f"ERROR: {self.msg}"
             super().__init__(self.formatted_msg)
     
     
    -class FailedToConnectException(DatabaseException):
    +class FailedToConnectError(DbtDatabaseError):
         pass
     
     
    -class CommandError(RuntimeException):
    +class CommandError(DbtRuntimeError):
         def __init__(self, cwd: str, cmd: List[str], msg: str = "Error running command"):
             cmd_scrubbed = list(scrub_secrets(cmd_txt, env_secrets()) for cmd_txt in cmd)
             super().__init__(msg)
    @@ -483,7 +406,7 @@ def __str__(self):
             return f"{self.msg} running: {self.cmd}"
     
     
    -class InvalidConnectionException(RuntimeException):
    +class InvalidConnectionError(DbtRuntimeError):
         def __init__(self, thread_id, known: List):
             self.thread_id = thread_id
             self.known = known
    @@ -492,17 +415,17 @@ def __init__(self, thread_id, known: List):
             )
     
     
    -class InvalidSelectorException(RuntimeException):
    +class InvalidSelectorError(DbtRuntimeError):
         def __init__(self, name: str):
             self.name = name
             super().__init__(name)
     
     
    -class DuplicateYamlKeyException(CompilationException):
    +class DuplicateYamlKeyError(CompilationError):
         pass
     
     
    -class ConnectionException(Exception):
    +class ConnectionError(Exception):
         """
         There was a problem with the connection that returned a bad response,
         timed out, or resulted in a file that is corrupt.
    @@ -512,7 +435,7 @@ class ConnectionException(Exception):
     
     
     # event level exception
    -class EventCompilationException(CompilationException):
    +class EventCompilationError(CompilationError):
         def __init__(self, msg: str, node):
             self.msg = scrub_secrets(msg, env_secrets())
             self.node = node
    @@ -520,7 +443,7 @@ def __init__(self, msg: str, node):
     
     
     # compilation level exceptions
    -class GraphDependencyNotFound(CompilationException):
    +class GraphDependencyNotFoundError(CompilationError):
         def __init__(self, node, dependency: str):
             self.node = node
             self.dependency = dependency
    @@ -534,21 +457,21 @@ def get_message(self) -> str:
     # client level exceptions
     
     
    -class NoSupportedLanguagesFound(CompilationException):
    +class NoSupportedLanguagesFoundError(CompilationError):
         def __init__(self, node):
             self.node = node
             self.msg = f"No supported_languages found in materialization macro {self.node.name}"
             super().__init__(msg=self.msg)
     
     
    -class MaterializtionMacroNotUsed(CompilationException):
    +class MaterializtionMacroNotUsedError(CompilationError):
         def __init__(self, node):
             self.node = node
             self.msg = "Only materialization macros can be used with this function"
             super().__init__(msg=self.msg)
     
     
    -class UndefinedCompilation(CompilationException):
    +class UndefinedCompilationError(CompilationError):
         def __init__(self, name: str, node):
             self.name = name
             self.node = node
    @@ -556,20 +479,20 @@ def __init__(self, name: str, node):
             super().__init__(msg=self.msg)
     
     
    -class CaughtMacroExceptionWithNode(CompilationException):
    +class CaughtMacroErrorWithNodeError(CompilationError):
         def __init__(self, exc, node):
             self.exc = exc
             self.node = node
             super().__init__(msg=str(exc))
     
     
    -class CaughtMacroException(CompilationException):
    +class CaughtMacroError(CompilationError):
         def __init__(self, exc):
             self.exc = exc
             super().__init__(msg=str(exc))
     
     
    -class MacroNameNotString(CompilationException):
    +class MacroNameNotStringError(CompilationError):
         def __init__(self, kwarg_value):
             self.kwarg_value = kwarg_value
             super().__init__(msg=self.get_message())
    @@ -582,7 +505,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class MissingControlFlowStartTag(CompilationException):
    +class MissingControlFlowStartTagError(CompilationError):
         def __init__(self, tag, expected_tag: str, tag_parser):
             self.tag = tag
             self.expected_tag = expected_tag
    @@ -598,7 +521,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class UnexpectedControlFlowEndTag(CompilationException):
    +class UnexpectedControlFlowEndTagError(CompilationError):
         def __init__(self, tag, expected_tag: str, tag_parser):
             self.tag = tag
             self.expected_tag = expected_tag
    @@ -614,7 +537,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class UnexpectedMacroEOF(CompilationException):
    +class UnexpectedMacroEOFError(CompilationError):
         def __init__(self, expected_name: str, actual_name: str):
             self.expected_name = expected_name
             self.actual_name = actual_name
    @@ -625,7 +548,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class MacroNamespaceNotString(CompilationException):
    +class MacroNamespaceNotStringError(CompilationError):
         def __init__(self, kwarg_type: Any):
             self.kwarg_type = kwarg_type
             super().__init__(msg=self.get_message())
    @@ -638,7 +561,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class NestedTags(CompilationException):
    +class NestedTagsError(CompilationError):
         def __init__(self, outer, inner):
             self.outer = outer
             self.inner = inner
    @@ -653,7 +576,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class BlockDefinitionNotAtTop(CompilationException):
    +class BlockDefinitionNotAtTopError(CompilationError):
         def __init__(self, tag_parser, tag_start):
             self.tag_parser = tag_parser
             self.tag_start = tag_start
    @@ -668,7 +591,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class MissingCloseTag(CompilationException):
    +class MissingCloseTagError(CompilationError):
         def __init__(self, block_type_name: str, linecount: int):
             self.block_type_name = block_type_name
             self.linecount = linecount
    @@ -679,7 +602,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class GitCloningProblem(RuntimeException):
    +class UnknownGitCloningProblemError(DbtRuntimeError):
         def __init__(self, repo: str):
             self.repo = scrub_secrets(repo, env_secrets())
             super().__init__(msg=self.get_message())
    @@ -692,7 +615,19 @@ def get_message(self) -> str:
             return msg
     
     
    -class GitCloningError(InternalException):
    +class BadSpecError(DbtInternalError):
    +    def __init__(self, repo, revision, error):
    +        self.repo = repo
    +        self.revision = revision
    +        self.stderr = scrub_secrets(error.stderr.strip(), env_secrets())
    +        super().__init__(msg=self.get_message())
    +
    +    def get_message(self) -> str:
    +        msg = f"Error checking out spec='{self.revision}' for repo {self.repo}\n{self.stderr}"
    +        return msg
    +
    +
    +class GitCloningError(DbtInternalError):
         def __init__(self, repo: str, revision: str, error: CommandResultError):
             self.repo = repo
             self.revision = revision
    @@ -711,19 +646,11 @@ def get_message(self) -> str:
             return scrub_secrets(msg, env_secrets())
     
     
    -class GitCheckoutError(InternalException):
    -    def __init__(self, repo: str, revision: str, error: CommandResultError):
    -        self.repo = repo
    -        self.revision = revision
    -        self.stderr = error.stderr.strip()
    -        super().__init__(msg=self.get_message())
    -
    -    def get_message(self) -> str:
    -        msg = f"Error checking out spec='{self.revision}' for repo {self.repo}\n{self.stderr}"
    -        return scrub_secrets(msg, env_secrets())
    +class GitCheckoutError(BadSpecError):
    +    pass
     
     
    -class InvalidMaterializationArg(CompilationException):
    +class MaterializationArgError(CompilationError):
         def __init__(self, name: str, argument: str):
             self.name = name
             self.argument = argument
    @@ -734,7 +661,22 @@ def get_message(self) -> str:
             return msg
     
     
    -class SymbolicLinkError(CompilationException):
    +class OperationError(CompilationError):
    +    def __init__(self, operation_name):
    +        self.operation_name = operation_name
    +        super().__init__(msg=self.get_message())
    +
    +    def get_message(self) -> str:
    +        msg = (
    +            f"dbt encountered an error when attempting to create a {self.operation_name}. "
    +            "If this error persists, please create an issue at: \n\n"
    +            "https://github.com/dbt-labs/dbt-core"
    +        )
    +
    +        return msg
    +
    +
    +class SymbolicLinkError(CompilationError):
         def __init__(self):
             super().__init__(msg=self.get_message())
     
    @@ -749,23 +691,21 @@ def get_message(self) -> str:
     
     
     # context level exceptions
    -
    -
    -class ZipStrictWrongType(CompilationException):
    +class ZipStrictWrongTypeError(CompilationError):
         def __init__(self, exc):
             self.exc = exc
             msg = str(self.exc)
             super().__init__(msg=msg)
     
     
    -class SetStrictWrongType(CompilationException):
    +class SetStrictWrongTypeError(CompilationError):
         def __init__(self, exc):
             self.exc = exc
             msg = str(self.exc)
             super().__init__(msg=msg)
     
     
    -class LoadAgateTableValueError(CompilationException):
    +class LoadAgateTableValueError(CompilationError):
         def __init__(self, exc: ValueError, node):
             self.exc = exc
             self.node = node
    @@ -773,7 +713,7 @@ def __init__(self, exc: ValueError, node):
             super().__init__(msg=msg)
     
     
    -class LoadAgateTableNotSeed(CompilationException):
    +class LoadAgateTableNotSeedError(CompilationError):
         def __init__(self, resource_type, node):
             self.resource_type = resource_type
             self.node = node
    @@ -781,14 +721,14 @@ def __init__(self, resource_type, node):
             super().__init__(msg=msg)
     
     
    -class MacrosSourcesUnWriteable(CompilationException):
    +class MacrosSourcesUnWriteableError(CompilationError):
         def __init__(self, node):
             self.node = node
             msg = 'cannot "write" macros or sources'
             super().__init__(msg=msg)
     
     
    -class PackageNotInDeps(CompilationException):
    +class PackageNotInDepsError(CompilationError):
         def __init__(self, package_name: str, node):
             self.package_name = package_name
             self.node = node
    @@ -796,7 +736,7 @@ def __init__(self, package_name: str, node):
             super().__init__(msg=msg)
     
     
    -class OperationsCannotRefEphemeralNodes(CompilationException):
    +class OperationsCannotRefEphemeralNodesError(CompilationError):
         def __init__(self, target_name: str, node):
             self.target_name = target_name
             self.node = node
    @@ -804,7 +744,7 @@ def __init__(self, target_name: str, node):
             super().__init__(msg=msg)
     
     
    -class InvalidPersistDocsValueType(CompilationException):
    +class PersistDocsValueTypeError(CompilationError):
         def __init__(self, persist_docs: Any):
             self.persist_docs = persist_docs
             msg = (
    @@ -814,14 +754,14 @@ def __init__(self, persist_docs: Any):
             super().__init__(msg=msg)
     
     
    -class InvalidInlineModelConfig(CompilationException):
    +class InlineModelConfigError(CompilationError):
         def __init__(self, node):
             self.node = node
             msg = "Invalid inline model config"
             super().__init__(msg=msg)
     
     
    -class ConflictingConfigKeys(CompilationException):
    +class ConflictingConfigKeysError(CompilationError):
         def __init__(self, oldkey: str, newkey: str, node):
             self.oldkey = oldkey
             self.newkey = newkey
    @@ -830,7 +770,7 @@ def __init__(self, oldkey: str, newkey: str, node):
             super().__init__(msg=msg)
     
     
    -class InvalidNumberSourceArgs(CompilationException):
    +class NumberSourceArgsError(CompilationError):
         def __init__(self, args, node):
             self.args = args
             self.node = node
    @@ -838,7 +778,7 @@ def __init__(self, args, node):
             super().__init__(msg=msg)
     
     
    -class RequiredVarNotFound(CompilationException):
    +class RequiredVarNotFoundError(CompilationError):
         def __init__(self, var_name: str, merged: Dict, node):
             self.var_name = var_name
             self.merged = merged
    @@ -858,14 +798,14 @@ def get_message(self) -> str:
             return msg
     
     
    -class PackageNotFoundForMacro(CompilationException):
    +class PackageNotFoundForMacroError(CompilationError):
         def __init__(self, package_name: str):
             self.package_name = package_name
             msg = f"Could not find package '{self.package_name}'"
             super().__init__(msg=msg)
     
     
    -class DisallowSecretEnvVar(ParsingException):
    +class SecretEnvVarLocationError(ParsingError):
         def __init__(self, env_var_name: str):
             self.env_var_name = env_var_name
             super().__init__(msg=self.get_message())
    @@ -878,7 +818,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class InvalidMacroArgType(CompilationException):
    +class MacroArgTypeError(CompilationError):
         def __init__(self, method_name: str, arg_name: str, got_value: Any, expected_type):
             self.method_name = method_name
             self.arg_name = arg_name
    @@ -896,7 +836,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class InvalidBoolean(CompilationException):
    +class BooleanError(CompilationError):
         def __init__(self, return_value: Any, macro_name: str):
             self.return_value = return_value
             self.macro_name = macro_name
    @@ -910,7 +850,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class RefInvalidArgs(CompilationException):
    +class RefArgsError(CompilationError):
         def __init__(self, node, args):
             self.node = node
             self.args = args
    @@ -921,7 +861,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class MetricInvalidArgs(CompilationException):
    +class MetricArgsError(CompilationError):
         def __init__(self, node, args):
             self.node = node
             self.args = args
    @@ -932,7 +872,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class RefBadContext(CompilationException):
    +class RefBadContextError(CompilationError):
         def __init__(self, node, args):
             self.node = node
             self.args = args
    @@ -961,7 +901,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class InvalidDocArgs(CompilationException):
    +class DocArgsError(CompilationError):
         def __init__(self, node, args):
             self.node = node
             self.args = args
    @@ -972,8 +912,8 @@ def get_message(self) -> str:
             return msg
     
     
    -class DocTargetNotFound(CompilationException):
    -    def __init__(self, node, target_doc_name: str, target_doc_package: Optional[str]):
    +class DocTargetNotFoundError(CompilationError):
    +    def __init__(self, node, target_doc_name: str, target_doc_package: Optional[str] = None):
             self.node = node
             self.target_doc_name = target_doc_name
             self.target_doc_package = target_doc_package
    @@ -987,7 +927,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class MacroInvalidDispatchArg(CompilationException):
    +class MacroDispatchArgError(CompilationError):
         def __init__(self, macro_name: str):
             self.macro_name = macro_name
             super().__init__(msg=self.get_message())
    @@ -1006,7 +946,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class DuplicateMacroName(CompilationException):
    +class DuplicateMacroNameError(CompilationError):
         def __init__(self, node_1, node_2, namespace: str):
             self.node_1 = node_1
             self.node_2 = node_2
    @@ -1032,7 +972,7 @@ def get_message(self) -> str:
     
     
     # parser level exceptions
    -class InvalidDictParse(ParsingException):
    +class DictParseError(ParsingError):
         def __init__(self, exc: ValidationError, node):
             self.exc = exc
             self.node = node
    @@ -1040,7 +980,7 @@ def __init__(self, exc: ValidationError, node):
             super().__init__(msg=msg)
     
     
    -class InvalidConfigUpdate(ParsingException):
    +class ConfigUpdateError(ParsingError):
         def __init__(self, exc: ValidationError, node):
             self.exc = exc
             self.node = node
    @@ -1048,7 +988,7 @@ def __init__(self, exc: ValidationError, node):
             super().__init__(msg=msg)
     
     
    -class PythonParsingException(ParsingException):
    +class PythonParsingError(ParsingError):
         def __init__(self, exc: SyntaxError, node):
             self.exc = exc
             self.node = node
    @@ -1060,7 +1000,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class PythonLiteralEval(ParsingException):
    +class PythonLiteralEvalError(ParsingError):
         def __init__(self, exc: Exception, node):
             self.exc = exc
             self.node = node
    @@ -1076,14 +1016,14 @@ def get_message(self) -> str:
             return msg
     
     
    -class InvalidModelConfig(ParsingException):
    +class ModelConfigError(ParsingError):
         def __init__(self, exc: ValidationError, node):
             self.msg = self.validator_error_message(exc)
             self.node = node
             super().__init__(msg=self.msg)
     
     
    -class YamlParseListFailure(ParsingException):
    +class YamlParseListError(ParsingError):
         def __init__(
             self,
             path: str,
    @@ -1108,7 +1048,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class YamlParseDictFailure(ParsingException):
    +class YamlParseDictError(ParsingError):
         def __init__(
             self,
             path: str,
    @@ -1133,8 +1073,13 @@ def get_message(self) -> str:
             return msg
     
     
    -class YamlLoadFailure(ParsingException):
    -    def __init__(self, project_name: Optional[str], path: str, exc: ValidationException):
    +class YamlLoadError(ParsingError):
    +    def __init__(
    +        self,
    +        path: str,
    +        exc: DbtValidationError,
    +        project_name: Optional[str] = None,
    +    ):
             self.project_name = project_name
             self.path = path
             self.exc = exc
    @@ -1148,49 +1093,54 @@ def get_message(self) -> str:
             return msg
     
     
    -class InvalidTestConfig(ParsingException):
    +class TestConfigError(ParsingError):
         def __init__(self, exc: ValidationError, node):
             self.msg = self.validator_error_message(exc)
             self.node = node
             super().__init__(msg=self.msg)
     
     
    -class InvalidSchemaConfig(ParsingException):
    +class SchemaConfigError(ParsingError):
         def __init__(self, exc: ValidationError, node):
             self.msg = self.validator_error_message(exc)
             self.node = node
             super().__init__(msg=self.msg)
     
     
    -class InvalidSnapshopConfig(ParsingException):
    +class SnapshopConfigError(ParsingError):
         def __init__(self, exc: ValidationError, node):
             self.msg = self.validator_error_message(exc)
             self.node = node
             super().__init__(msg=self.msg)
     
     
    -class SameKeyNested(CompilationException):
    +class SameKeyNestedError(CompilationError):
         def __init__(self):
             msg = "Test cannot have the same key at the top-level and in config"
             super().__init__(msg=msg)
     
     
    -class TestArgIncludesModel(CompilationException):
    +class TestArgIncludesModelError(CompilationError):
         def __init__(self):
             msg = 'Test arguments include "model", which is a reserved argument'
             super().__init__(msg=msg)
     
     
    -class UnexpectedTestNamePattern(CompilationException):
    +class UnexpectedTestNamePatternError(CompilationError):
         def __init__(self, test_name: str):
             self.test_name = test_name
             msg = f"Test name string did not match expected pattern: {self.test_name}"
             super().__init__(msg=msg)
     
     
    -class CustomMacroPopulatingConfigValues(CompilationException):
    +class CustomMacroPopulatingConfigValueError(CompilationError):
         def __init__(
    -        self, target_name: str, column_name: Optional[str], name: str, key: str, err_msg: str
    +        self,
    +        target_name: str,
    +        name: str,
    +        key: str,
    +        err_msg: str,
    +        column_name: Optional[str] = None,
         ):
             self.target_name = target_name
             self.column_name = column_name
    @@ -1220,21 +1170,21 @@ def get_message(self) -> str:
             return msg
     
     
    -class TagsNotListOfStrings(CompilationException):
    +class TagsNotListOfStringsError(CompilationError):
         def __init__(self, tags: Any):
             self.tags = tags
             msg = f"got {self.tags} ({type(self.tags)}) for tags, expected a list of strings"
             super().__init__(msg=msg)
     
     
    -class TagNotString(CompilationException):
    +class TagNotStringError(CompilationError):
         def __init__(self, tag: Any):
             self.tag = tag
             msg = f"got {self.tag} ({type(self.tag)}) for tag, expected a str"
             super().__init__(msg=msg)
     
     
    -class TestNameNotString(ParsingException):
    +class TestNameNotStringError(ParsingError):
         def __init__(self, test_name: Any):
             self.test_name = test_name
             super().__init__(msg=self.get_message())
    @@ -1245,7 +1195,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class TestArgsNotDict(ParsingException):
    +class TestArgsNotDictError(ParsingError):
         def __init__(self, test_args: Any):
             self.test_args = test_args
             super().__init__(msg=self.get_message())
    @@ -1256,7 +1206,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class TestDefinitionDictLength(ParsingException):
    +class TestDefinitionDictLengthError(ParsingError):
         def __init__(self, test):
             self.test = test
             super().__init__(msg=self.get_message())
    @@ -1270,7 +1220,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class TestInvalidType(ParsingException):
    +class TestTypeError(ParsingError):
         def __init__(self, test: Any):
             self.test = test
             super().__init__(msg=self.get_message())
    @@ -1281,7 +1231,7 @@ def get_message(self) -> str:
     
     
     # This is triggered across multiple files
    -class EnvVarMissing(ParsingException):
    +class EnvVarMissingError(ParsingError):
         def __init__(self, var: str):
             self.var = var
             super().__init__(msg=self.get_message())
    @@ -1291,7 +1241,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class TargetNotFound(CompilationException):
    +class TargetNotFoundError(CompilationError):
         def __init__(
             self,
             node,
    @@ -1330,7 +1280,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class DuplicateSourcePatchName(CompilationException):
    +class DuplicateSourcePatchNameError(CompilationError):
         def __init__(self, patch_1, patch_2):
             self.patch_1 = patch_1
             self.patch_2 = patch_2
    @@ -1352,7 +1302,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class DuplicateMacroPatchName(CompilationException):
    +class DuplicateMacroPatchNameError(CompilationError):
         def __init__(self, patch_1, existing_patch_path):
             self.patch_1 = patch_1
             self.existing_patch_path = existing_patch_path
    @@ -1373,7 +1323,7 @@ def get_message(self) -> str:
     
     
     # core level exceptions
    -class DuplicateAlias(AliasException):
    +class DuplicateAliasError(AliasError):
         def __init__(self, kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str):
             self.kwargs = kwargs
             self.aliases = aliases
    @@ -1390,9 +1340,7 @@ def get_message(self) -> str:
     
     
     # Postgres Exceptions
    -
    -
    -class UnexpectedDbReference(NotImplementedException):
    +class UnexpectedDbReferenceError(NotImplementedError):
         def __init__(self, adapter, database, expected):
             self.adapter = adapter
             self.database = database
    @@ -1404,7 +1352,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class CrossDbReferenceProhibited(CompilationException):
    +class CrossDbReferenceProhibitedError(CompilationError):
         def __init__(self, adapter, exc_msg: str):
             self.adapter = adapter
             self.exc_msg = exc_msg
    @@ -1415,7 +1363,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class IndexConfigNotDict(CompilationException):
    +class IndexConfigNotDictError(CompilationError):
         def __init__(self, raw_index: Any):
             self.raw_index = raw_index
             super().__init__(msg=self.get_message())
    @@ -1429,7 +1377,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class InvalidIndexConfig(CompilationException):
    +class IndexConfigError(CompilationError):
         def __init__(self, exc: TypeError):
             self.exc = exc
             super().__init__(msg=self.get_message())
    @@ -1441,7 +1389,7 @@ def get_message(self) -> str:
     
     
     # adapters exceptions
    -class InvalidMacroResult(CompilationException):
    +class MacroResultError(CompilationError):
         def __init__(self, freshness_macro_name: str, table):
             self.freshness_macro_name = freshness_macro_name
             self.table = table
    @@ -1453,7 +1401,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class SnapshotTargetNotSnapshotTable(CompilationException):
    +class SnapshotTargetNotSnapshotTableError(CompilationError):
         def __init__(self, missing: List):
             self.missing = missing
             super().__init__(msg=self.get_message())
    @@ -1465,7 +1413,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class SnapshotTargetIncomplete(CompilationException):
    +class SnapshotTargetIncompleteError(CompilationError):
         def __init__(self, extra: List, missing: List):
             self.extra = extra
             self.missing = missing
    @@ -1481,7 +1429,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class RenameToNoneAttempted(CompilationException):
    +class RenameToNoneAttemptedError(CompilationError):
         def __init__(self, src_name: str, dst_name: str, name: str):
             self.src_name = src_name
             self.dst_name = dst_name
    @@ -1490,21 +1438,21 @@ def __init__(self, src_name: str, dst_name: str, name: str):
             super().__init__(msg=self.msg)
     
     
    -class NullRelationDropAttempted(CompilationException):
    +class NullRelationDropAttemptedError(CompilationError):
         def __init__(self, name: str):
             self.name = name
             self.msg = f"Attempted to drop a null relation for {self.name}"
             super().__init__(msg=self.msg)
     
     
    -class NullRelationCacheAttempted(CompilationException):
    +class NullRelationCacheAttemptedError(CompilationError):
         def __init__(self, name: str):
             self.name = name
             self.msg = f"Attempted to cache a null relation for {self.name}"
             super().__init__(msg=self.msg)
     
     
    -class InvalidQuoteConfigType(CompilationException):
    +class QuoteConfigTypeError(CompilationError):
         def __init__(self, quote_config: Any):
             self.quote_config = quote_config
             super().__init__(msg=self.get_message())
    @@ -1517,7 +1465,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class MultipleDatabasesNotAllowed(CompilationException):
    +class MultipleDatabasesNotAllowedError(CompilationError):
         def __init__(self, databases):
             self.databases = databases
             super().__init__(msg=self.get_message())
    @@ -1527,26 +1475,25 @@ def get_message(self) -> str:
             return msg
     
     
    -class RelationTypeNull(CompilationException):
    +class RelationTypeNullError(CompilationError):
         def __init__(self, relation):
             self.relation = relation
             self.msg = f"Tried to drop relation {self.relation}, but its type is null."
             super().__init__(msg=self.msg)
     
     
    -class MaterializationNotAvailable(CompilationException):
    -    def __init__(self, model, adapter_type: str):
    -        self.model = model
    +class MaterializationNotAvailableError(CompilationError):
    +    def __init__(self, materialization, adapter_type: str):
    +        self.materialization = materialization
             self.adapter_type = adapter_type
             super().__init__(msg=self.get_message())
     
         def get_message(self) -> str:
    -        materialization = self.model.get_materialization()
    -        msg = f"Materialization '{materialization}' is not available for {self.adapter_type}!"
    +        msg = f"Materialization '{self.materialization}' is not available for {self.adapter_type}!"
             return msg
     
     
    -class RelationReturnedMultipleResults(CompilationException):
    +class RelationReturnedMultipleResultsError(CompilationError):
         def __init__(self, kwargs: Mapping[str, Any], matches: List):
             self.kwargs = kwargs
             self.matches = matches
    @@ -1561,7 +1508,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class ApproximateMatch(CompilationException):
    +class ApproximateMatchError(CompilationError):
         def __init__(self, target, relation):
             self.target = target
             self.relation = relation
    @@ -1579,8 +1526,7 @@ def get_message(self) -> str:
             return msg
     
     
    -# adapters exceptions
    -class UnexpectedNull(DatabaseException):
    +class UnexpectedNullError(DbtDatabaseError):
         def __init__(self, field_name: str, source):
             self.field_name = field_name
             self.source = source
    @@ -1591,7 +1537,7 @@ def __init__(self, field_name: str, source):
             super().__init__(msg)
     
     
    -class UnexpectedNonTimestamp(DatabaseException):
    +class UnexpectedNonTimestampError(DbtDatabaseError):
         def __init__(self, field_name: str, source, dt: Any):
             self.field_name = field_name
             self.source = source
    @@ -1604,7 +1550,7 @@ def __init__(self, field_name: str, source, dt: Any):
     
     
     # deps exceptions
    -class MultipleVersionGitDeps(DependencyException):
    +class MultipleVersionGitDepsError(DependencyError):
         def __init__(self, git: str, requested):
             self.git = git
             self.requested = requested
    @@ -1615,7 +1561,7 @@ def __init__(self, git: str, requested):
             super().__init__(msg)
     
     
    -class DuplicateProjectDependency(DependencyException):
    +class DuplicateProjectDependencyError(DependencyError):
         def __init__(self, project_name: str):
             self.project_name = project_name
             msg = (
    @@ -1625,7 +1571,7 @@ def __init__(self, project_name: str):
             super().__init__(msg)
     
     
    -class DuplicateDependencyToRoot(DependencyException):
    +class DuplicateDependencyToRootError(DependencyError):
         def __init__(self, project_name: str):
             self.project_name = project_name
             msg = (
    @@ -1636,7 +1582,7 @@ def __init__(self, project_name: str):
             super().__init__(msg)
     
     
    -class MismatchedDependencyTypes(DependencyException):
    +class MismatchedDependencyTypeError(DependencyError):
         def __init__(self, new, old):
             self.new = new
             self.old = old
    @@ -1647,7 +1593,7 @@ def __init__(self, new, old):
             super().__init__(msg)
     
     
    -class PackageVersionNotFound(DependencyException):
    +class PackageVersionNotFoundError(DependencyError):
         def __init__(
             self,
             package_name: str,
    @@ -1683,7 +1629,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class PackageNotFound(DependencyException):
    +class PackageNotFoundError(DependencyError):
         def __init__(self, package_name: str):
             self.package_name = package_name
             msg = f"Package {self.package_name} was not found in the package index"
    @@ -1691,37 +1637,35 @@ def __init__(self, package_name: str):
     
     
     # config level exceptions
    -
    -
    -class ProfileConfigInvalid(DbtProfileError):
    +class ProfileConfigError(DbtProfileError):
         def __init__(self, exc: ValidationError):
             self.exc = exc
             msg = self.validator_error_message(self.exc)
             super().__init__(msg=msg)
     
     
    -class ProjectContractInvalid(DbtProjectError):
    +class ProjectContractError(DbtProjectError):
         def __init__(self, exc: ValidationError):
             self.exc = exc
             msg = self.validator_error_message(self.exc)
             super().__init__(msg=msg)
     
     
    -class ProjectContractBroken(DbtProjectError):
    +class ProjectContractBrokenError(DbtProjectError):
         def __init__(self, exc: ValidationError):
             self.exc = exc
             msg = self.validator_error_message(self.exc)
             super().__init__(msg=msg)
     
     
    -class ConfigContractBroken(DbtProjectError):
    +class ConfigContractBrokenError(DbtProjectError):
         def __init__(self, exc: ValidationError):
             self.exc = exc
             msg = self.validator_error_message(self.exc)
             super().__init__(msg=msg)
     
     
    -class NonUniquePackageName(CompilationException):
    +class NonUniquePackageNameError(CompilationError):
         def __init__(self, project_name: str):
             self.project_name = project_name
             super().__init__(msg=self.get_message())
    @@ -1736,7 +1680,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class UninstalledPackagesFound(CompilationException):
    +class UninstalledPackagesFoundError(CompilationError):
         def __init__(
             self,
             count_packages_specified: int,
    @@ -1759,22 +1703,34 @@ def get_message(self) -> str:
             return msg
     
     
    -class VarsArgNotYamlDict(CompilationException):
    -    def __init__(self, var_type):
    +class OptionNotYamlDictError(CompilationError):
    +    def __init__(self, var_type, option_name):
             self.var_type = var_type
    +        self.option_name = option_name
             super().__init__(msg=self.get_message())
     
         def get_message(self) -> str:
             type_name = self.var_type.__name__
     
    -        msg = f"The --vars argument must be a YAML dictionary, but was of type '{type_name}'"
    +        msg = f"The --{self.option_name} argument must be a YAML dictionary, but was of type '{type_name}'"
             return msg
     
     
     # contracts level
    +class UnrecognizedCredentialTypeError(CompilationError):
    +    def __init__(self, typename: str, supported_types: List):
    +        self.typename = typename
    +        self.supported_types = supported_types
    +        super().__init__(msg=self.get_message())
    +
    +    def get_message(self) -> str:
    +        msg = 'Unrecognized credentials type "{}" - supported types are ({})'.format(
    +            self.typename, ", ".join('"{}"'.format(t) for t in self.supported_types)
    +        )
    +        return msg
     
     
    -class DuplicateMacroInPackage(CompilationException):
    +class DuplicateMacroInPackageError(CompilationError):
         def __init__(self, macro, macro_mapping: Mapping):
             self.macro = macro
             self.macro_mapping = macro_mapping
    @@ -1803,7 +1759,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class DuplicateMaterializationName(CompilationException):
    +class DuplicateMaterializationNameError(CompilationError):
         def __init__(self, macro, other_macro):
             self.macro = macro
             self.other_macro = other_macro
    @@ -1823,7 +1779,30 @@ def get_message(self) -> str:
     
     
     # jinja exceptions
    -class MissingConfig(CompilationException):
    +class PatchTargetNotFoundError(CompilationError):
    +    def __init__(self, patches: Dict):
    +        self.patches = patches
    +        super().__init__(msg=self.get_message())
    +
    +    def get_message(self) -> str:
    +        patch_list = "\n\t".join(
    +            f"model {p.name} (referenced in path {p.original_file_path})"
    +            for p in self.patches.values()
    +        )
    +        msg = f"dbt could not find models for the following patches:\n\t{patch_list}"
    +        return msg
    +
    +
    +class MacroNotFoundError(CompilationError):
    +    def __init__(self, node, target_macro_id: str):
    +        self.node = node
    +        self.target_macro_id = target_macro_id
    +        msg = f"'{self.node.unique_id}' references macro '{self.target_macro_id}' which is not defined!"
    +
    +        super().__init__(msg=msg)
    +
    +
    +class MissingConfigError(CompilationError):
         def __init__(self, unique_id: str, name: str):
             self.unique_id = unique_id
             self.name = name
    @@ -1833,25 +1812,24 @@ def __init__(self, unique_id: str, name: str):
             super().__init__(msg=msg)
     
     
    -class MissingMaterialization(CompilationException):
    -    def __init__(self, model, adapter_type):
    -        self.model = model
    +class MissingMaterializationError(CompilationError):
    +    def __init__(self, materialization, adapter_type):
    +        self.materialization = materialization
             self.adapter_type = adapter_type
             super().__init__(msg=self.get_message())
     
         def get_message(self) -> str:
    -        materialization = self.model.get_materialization()
     
             valid_types = "'default'"
     
             if self.adapter_type != "default":
                 valid_types = f"'default' and '{self.adapter_type}'"
     
    -        msg = f"No materialization '{materialization}' was found for adapter {self.adapter_type}! (searched types {valid_types})"
    +        msg = f"No materialization '{self.materialization}' was found for adapter {self.adapter_type}! (searched types {valid_types})"
             return msg
     
     
    -class MissingRelation(CompilationException):
    +class MissingRelationError(CompilationError):
         def __init__(self, relation, model=None):
             self.relation = relation
             self.model = model
    @@ -1859,7 +1837,7 @@ def __init__(self, relation, model=None):
             super().__init__(msg=msg)
     
     
    -class AmbiguousAlias(CompilationException):
    +class AmbiguousAliasError(CompilationError):
         def __init__(self, node_1, node_2, duped_name=None):
             self.node_1 = node_1
             self.node_2 = node_2
    @@ -1880,7 +1858,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class AmbiguousCatalogMatch(CompilationException):
    +class AmbiguousCatalogMatchError(CompilationError):
         def __init__(self, unique_id: str, match_1, match_2):
             self.unique_id = unique_id
             self.match_1 = match_1
    @@ -1904,14 +1882,14 @@ def get_message(self) -> str:
             return msg
     
     
    -class CacheInconsistency(InternalException):
    +class CacheInconsistencyError(DbtInternalError):
         def __init__(self, msg: str):
             self.msg = msg
             formatted_msg = f"Cache inconsistency detected: {self.msg}"
             super().__init__(msg=formatted_msg)
     
     
    -class NewNameAlreadyInCache(CacheInconsistency):
    +class NewNameAlreadyInCacheError(CacheInconsistencyError):
         def __init__(self, old_key: str, new_key: str):
             self.old_key = old_key
             self.new_key = new_key
    @@ -1921,21 +1899,21 @@ def __init__(self, old_key: str, new_key: str):
             super().__init__(msg)
     
     
    -class ReferencedLinkNotCached(CacheInconsistency):
    +class ReferencedLinkNotCachedError(CacheInconsistencyError):
         def __init__(self, referenced_key: str):
             self.referenced_key = referenced_key
             msg = f"in add_link, referenced link key {self.referenced_key} not in cache!"
             super().__init__(msg)
     
     
    -class DependentLinkNotCached(CacheInconsistency):
    +class DependentLinkNotCachedError(CacheInconsistencyError):
         def __init__(self, dependent_key: str):
             self.dependent_key = dependent_key
             msg = f"in add_link, dependent link key {self.dependent_key} not in cache!"
             super().__init__(msg)
     
     
    -class TruncatedModelNameCausedCollision(CacheInconsistency):
    +class TruncatedModelNameCausedCollisionError(CacheInconsistencyError):
         def __init__(self, new_key, relations: Dict):
             self.new_key = new_key
             self.relations = relations
    @@ -1962,14 +1940,14 @@ def get_message(self) -> str:
             return msg
     
     
    -class NoneRelationFound(CacheInconsistency):
    +class NoneRelationFoundError(CacheInconsistencyError):
         def __init__(self):
             msg = "in get_relations, a None relation was found in the cache!"
             super().__init__(msg)
     
     
     # this is part of the context and also raised in dbt.contracts.relation.py
    -class DataclassNotDict(CompilationException):
    +class DataclassNotDictError(CompilationError):
         def __init__(self, obj: Any):
             self.obj = obj
             super().__init__(msg=self.get_message())
    @@ -1983,7 +1961,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class DependencyNotFound(CompilationException):
    +class DependencyNotFoundError(CompilationError):
         def __init__(self, node, node_description, required_pkg):
             self.node = node
             self.node_description = node_description
    @@ -2000,7 +1978,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class DuplicatePatchPath(CompilationException):
    +class DuplicatePatchPathError(CompilationError):
         def __init__(self, patch_1, existing_patch_path):
             self.patch_1 = patch_1
             self.existing_patch_path = existing_patch_path
    @@ -2022,8 +2000,8 @@ def get_message(self) -> str:
             return msg
     
     
    -# should this inherit ParsingException instead?
    -class DuplicateResourceName(CompilationException):
    +# should this inherit ParsingError instead?
    +class DuplicateResourceNameError(CompilationError):
         def __init__(self, node_1, node_2):
             self.node_1 = node_1
             self.node_2 = node_2
    @@ -2075,7 +2053,7 @@ def get_message(self) -> str:
             return msg
     
     
    -class InvalidPropertyYML(CompilationException):
    +class PropertyYMLError(CompilationError):
         def __init__(self, path: str, issue: str):
             self.path = path
             self.issue = issue
    @@ -2090,14 +2068,14 @@ def get_message(self) -> str:
             return msg
     
     
    -class PropertyYMLMissingVersion(InvalidPropertyYML):
    +class PropertyYMLMissingVersionError(PropertyYMLError):
         def __init__(self, path: str):
             self.path = path
             self.issue = f"the yml property file {self.path} is missing a version tag"
             super().__init__(self.path, self.issue)
     
     
    -class PropertyYMLVersionNotInt(InvalidPropertyYML):
    +class PropertyYMLVersionNotIntError(PropertyYMLError):
         def __init__(self, path: str, version: Any):
             self.path = path
             self.version = version
    @@ -2108,7 +2086,7 @@ def __init__(self, path: str, version: Any):
             super().__init__(self.path, self.issue)
     
     
    -class PropertyYMLInvalidTag(InvalidPropertyYML):
    +class PropertyYMLInvalidTagError(PropertyYMLError):
         def __init__(self, path: str, version: int):
             self.path = path
             self.version = version
    @@ -2116,7 +2094,7 @@ def __init__(self, path: str, version: int):
             super().__init__(self.path, self.issue)
     
     
    -class RelationWrongType(CompilationException):
    +class RelationWrongTypeError(CompilationError):
         def __init__(self, relation, expected_type, model=None):
             self.relation = relation
             self.expected_type = expected_type
    @@ -2134,144 +2112,375 @@ def get_message(self) -> str:
             return msg
     
     
    +# not modifying these since rpc should be deprecated soon
    +class UnknownAsyncIDException(Exception):
    +    CODE = 10012
    +    MESSAGE = "RPC server got an unknown async ID"
    +
    +    def __init__(self, task_id):
    +        self.task_id = task_id
    +
    +    def __str__(self):
    +        return f"{self.MESSAGE}: {self.task_id}"
    +
    +
    +class RPCFailureResult(DbtRuntimeError):
    +    CODE = 10002
    +    MESSAGE = "RPC execution error"
    +
    +
    +class RPCTimeoutException(DbtRuntimeError):
    +    CODE = 10008
    +    MESSAGE = "RPC timeout error"
    +
    +    def __init__(self, timeout: Optional[float] = None):
    +        super().__init__(self.MESSAGE)
    +        self.timeout = timeout
    +
    +    def data(self):
    +        result = super().data()
    +        result.update(
    +            {
    +                "timeout": self.timeout,
    +                "message": f"RPC timed out after {self.timeout}s",
    +            }
    +        )
    +        return result
    +
    +
    +class RPCKilledException(DbtRuntimeError):
    +    CODE = 10009
    +    MESSAGE = "RPC process killed"
    +
    +    def __init__(self, signum: int):
    +        self.signum = signum
    +        self.msg = f"RPC process killed by signal {self.signum}"
    +        super().__init__(self.msg)
    +
    +    def data(self):
    +        return {
    +            "signum": self.signum,
    +            "message": self.msg,
    +        }
    +
    +
    +class RPCCompiling(DbtRuntimeError):
    +    CODE = 10010
    +    MESSAGE = 'RPC server is compiling the project, call the "status" method for' " compile status"
    +
    +    def __init__(self, msg: str = None, node=None):
    +        if msg is None:
    +            msg = "compile in progress"
    +        super().__init__(msg, node)
    +
    +
    +class RPCLoadException(DbtRuntimeError):
    +    CODE = 10011
    +    MESSAGE = (
    +        'RPC server failed to compile project, call the "status" method for' " compile status"
    +    )
    +
    +    def __init__(self, cause: Dict[str, Any]):
    +        self.cause = cause
    +        self.msg = f'{self.MESSAGE}: {self.cause["message"]}'
    +        super().__init__(self.msg)
    +
    +    def data(self):
    +        return {"cause": self.cause, "message": self.msg}
    +
    +
     # These are copies of what's in dbt/context/exceptions_jinja.py to not immediately break adapters
     # utilizing these functions as exceptions.  These are direct copies to avoid circular imports.
     # They will be removed in 1 (or 2?) versions.  Issue to be created to ensure it happens.
     
     # TODO: add deprecation to functions
    +DEPRECATION_VERSION = "1.5.0"
    +SUGGESTED_ACTION = "using `raise {exception}` directly instead"
    +REASON = "See https://github.com/dbt-labs/dbt-core/issues/6393 for more details"
    +
    +
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="JinjaLogWarning"),
    +    reason=REASON,
    +)
     def warn(msg, node=None):
         warn_or_error(JinjaLogWarning(msg=msg, node_info=get_node_info()))
         return ""
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="MissingConfigError"),
    +    reason=REASON,
    +)
     def missing_config(model, name) -> NoReturn:
    -    raise MissingConfig(unique_id=model.unique_id, name=name)
    +    raise MissingConfigError(unique_id=model.unique_id, name=name)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="MissingMaterializationError"),
    +    reason=REASON,
    +)
     def missing_materialization(model, adapter_type) -> NoReturn:
    -    raise MissingMaterialization(model=model, adapter_type=adapter_type)
    +    materialization = model.config.materialized
    +    raise MissingMaterializationError(materialization=materialization, adapter_type=adapter_type)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="MissingRelationError"),
    +    reason=REASON,
    +)
     def missing_relation(relation, model=None) -> NoReturn:
    -    raise MissingRelation(relation, model)
    +    raise MissingRelationError(relation, model)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="AmbiguousAliasError"),
    +    reason=REASON,
    +)
     def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn:
    -    raise AmbiguousAlias(node_1, node_2, duped_name)
    +    raise AmbiguousAliasError(node_1, node_2, duped_name)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="AmbiguousCatalogMatchError"),
    +    reason=REASON,
    +)
     def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn:
    -    raise AmbiguousCatalogMatch(unique_id, match_1, match_2)
    +    raise AmbiguousCatalogMatchError(unique_id, match_1, match_2)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="CacheInconsistencyError"),
    +    reason=REASON,
    +)
     def raise_cache_inconsistent(message) -> NoReturn:
    -    raise CacheInconsistency(message)
    +    raise CacheInconsistencyError(message)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="DataclassNotDictError"),
    +    reason=REASON,
    +)
     def raise_dataclass_not_dict(obj) -> NoReturn:
    -    raise DataclassNotDict(obj)
    +    raise DataclassNotDictError(obj)
     
     
    -# note: this is called all over the code in addition to in jinja
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="CompilationError"),
    +    reason=REASON,
    +)
     def raise_compiler_error(msg, node=None) -> NoReturn:
    -    raise CompilationException(msg, node)
    +    raise CompilationError(msg, node)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="DbtDatabaseError"),
    +    reason=REASON,
    +)
     def raise_database_error(msg, node=None) -> NoReturn:
    -    raise DatabaseException(msg, node)
    +    raise DbtDatabaseError(msg, node)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="DependencyNotFoundError"),
    +    reason=REASON,
    +)
     def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn:
    -    raise DependencyNotFound(node, node_description, required_pkg)
    +    raise DependencyNotFoundError(node, node_description, required_pkg)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="DependencyError"),
    +    reason=REASON,
    +)
     def raise_dependency_error(msg) -> NoReturn:
    -    raise DependencyException(scrub_secrets(msg, env_secrets()))
    +    raise DependencyError(scrub_secrets(msg, env_secrets()))
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="DuplicatePatchPathError"),
    +    reason=REASON,
    +)
     def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn:
    -    raise DuplicatePatchPath(patch_1, existing_patch_path)
    +    raise DuplicatePatchPathError(patch_1, existing_patch_path)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="DuplicateResourceNameError"),
    +    reason=REASON,
    +)
     def raise_duplicate_resource_name(node_1, node_2) -> NoReturn:
    -    raise DuplicateResourceName(node_1, node_2)
    +    raise DuplicateResourceNameError(node_1, node_2)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="PropertyYMLError"),
    +    reason=REASON,
    +)
     def raise_invalid_property_yml_version(path, issue) -> NoReturn:
    -    raise InvalidPropertyYML(path, issue)
    +    raise PropertyYMLError(path, issue)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="NotImplementedError"),
    +    reason=REASON,
    +)
     def raise_not_implemented(msg) -> NoReturn:
    -    raise NotImplementedException(msg)
    +    raise NotImplementedError(msg)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="RelationWrongTypeError"),
    +    reason=REASON,
    +)
     def relation_wrong_type(relation, expected_type, model=None) -> NoReturn:
    -    raise RelationWrongType(relation, expected_type, model)
    +    raise RelationWrongTypeError(relation, expected_type, model)
     
     
     # these were implemented in core so deprecating here by calling the new exception directly
    +
    +
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="DuplicateAliasError"),
    +    reason=REASON,
    +)
     def raise_duplicate_alias(
         kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str
     ) -> NoReturn:
    -    raise DuplicateAlias(kwargs, aliases, canonical_key)
    +    raise DuplicateAliasError(kwargs, aliases, canonical_key)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="DuplicateSourcePatchNameError"),
    +    reason=REASON,
    +)
     def raise_duplicate_source_patch_name(patch_1, patch_2):
    -    raise DuplicateSourcePatchName(patch_1, patch_2)
    +    raise DuplicateSourcePatchNameError(patch_1, patch_2)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="DuplicateMacroPatchNameError"),
    +    reason=REASON,
    +)
     def raise_duplicate_macro_patch_name(patch_1, existing_patch_path):
    -    raise DuplicateMacroPatchName(patch_1, existing_patch_path)
    +    raise DuplicateMacroPatchNameError(patch_1, existing_patch_path)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="DuplicateMacroNameError"),
    +    reason=REASON,
    +)
     def raise_duplicate_macro_name(node_1, node_2, namespace) -> NoReturn:
    -    raise DuplicateMacroName(node_1, node_2, namespace)
    +    raise DuplicateMacroNameError(node_1, node_2, namespace)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="ApproximateMatchError"),
    +    reason=REASON,
    +)
     def approximate_relation_match(target, relation):
    -    raise ApproximateMatch(target, relation)
    +    raise ApproximateMatchError(target, relation)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="RelationReturnedMultipleResultsError"),
    +    reason=REASON,
    +)
     def get_relation_returned_multiple_results(kwargs, matches):
    -    raise RelationReturnedMultipleResults(kwargs, matches)
    +    raise RelationReturnedMultipleResultsError(kwargs, matches)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="OperationError"),
    +    reason=REASON,
    +)
     def system_error(operation_name):
    -    # Note: This was converted for core to use SymbolicLinkError because it's the only way it was used. Maintaining flexibility here for now.
    -    msg = (
    -        f"dbt encountered an error when attempting to {operation_name}. "
    -        "If this error persists, please create an issue at: \n\n"
    -        "https://github.com/dbt-labs/dbt-core"
    -    )
    -    raise CompilationException(msg)
    +    raise OperationError(operation_name)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="InvalidMaterializationArgError"),
    +    reason=REASON,
    +)
     def invalid_materialization_argument(name, argument):
    -    raise InvalidMaterializationArg(name, argument)
    +    raise MaterializationArgError(name, argument)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="BadSpecError"),
    +    reason=REASON,
    +)
     def bad_package_spec(repo, spec, error_message):
    -    msg = f"Error checking out spec='{spec}' for repo {repo}\n{error_message}"
    -    raise InternalException(scrub_secrets(msg, env_secrets()))
    +    raise BadSpecError(spec, repo, error_message)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="CommandResultError"),
    +    reason=REASON,
    +)
     def raise_git_cloning_error(error: CommandResultError) -> NoReturn:
    -    error.cmd = list(scrub_secrets(str(error.cmd), env_secrets()))
         raise error
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="UnknownGitCloningProblemError"),
    +    reason=REASON,
    +)
     def raise_git_cloning_problem(repo) -> NoReturn:
    -    raise GitCloningProblem(repo)
    +    raise UnknownGitCloningProblemError(repo)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="MacroDispatchArgError"),
    +    reason=REASON,
    +)
     def macro_invalid_dispatch_arg(macro_name) -> NoReturn:
    -    raise MacroInvalidDispatchArg(macro_name)
    +    raise MacroDispatchArgError(macro_name)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="GraphDependencyNotFoundError"),
    +    reason=REASON,
    +)
     def dependency_not_found(node, dependency):
    -    raise GraphDependencyNotFound(node, dependency)
    +    raise GraphDependencyNotFoundError(node, dependency)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="TargetNotFoundError"),
    +    reason=REASON,
    +)
     def target_not_found(
         node,
         target_name: str,
    @@ -2279,7 +2488,7 @@ def target_not_found(
         target_package: Optional[str] = None,
         disabled: Optional[bool] = None,
     ) -> NoReturn:
    -    raise TargetNotFound(
    +    raise TargetNotFoundError(
             node=node,
             target_name=target_name,
             target_kind=target_kind,
    @@ -2288,83 +2497,153 @@ def target_not_found(
         )
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="DocTargetNotFoundError"),
    +    reason=REASON,
    +)
     def doc_target_not_found(
    -    model, target_doc_name: str, target_doc_package: Optional[str]
    +    model, target_doc_name: str, target_doc_package: Optional[str] = None
     ) -> NoReturn:
    -    raise DocTargetNotFound(
    +    raise DocTargetNotFoundError(
             node=model, target_doc_name=target_doc_name, target_doc_package=target_doc_package
         )
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="DocArgsError"),
    +    reason=REASON,
    +)
     def doc_invalid_args(model, args) -> NoReturn:
    -    raise InvalidDocArgs(node=model, args=args)
    +    raise DocArgsError(node=model, args=args)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="RefBadContextError"),
    +    reason=REASON,
    +)
     def ref_bad_context(model, args) -> NoReturn:
    -    raise RefBadContext(node=model, args=args)
    +    raise RefBadContextError(node=model, args=args)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="MetricArgsError"),
    +    reason=REASON,
    +)
     def metric_invalid_args(model, args) -> NoReturn:
    -    raise MetricInvalidArgs(node=model, args=args)
    +    raise MetricArgsError(node=model, args=args)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="RefArgsError"),
    +    reason=REASON,
    +)
     def ref_invalid_args(model, args) -> NoReturn:
    -    raise RefInvalidArgs(node=model, args=args)
    +    raise RefArgsError(node=model, args=args)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="BooleanError"),
    +    reason=REASON,
    +)
     def invalid_bool_error(got_value, macro_name) -> NoReturn:
    -    raise InvalidBoolean(return_value=got_value, macro_name=macro_name)
    +    raise BooleanError(return_value=got_value, macro_name=macro_name)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="MacroArgTypeError"),
    +    reason=REASON,
    +)
     def invalid_type_error(method_name, arg_name, got_value, expected_type) -> NoReturn:
    -    """Raise a CompilationException when an adapter method available to macros
    +    """Raise a InvalidMacroArgType when an adapter method available to macros
         has changed.
         """
    -    raise InvalidMacroArgType(method_name, arg_name, got_value, expected_type)
    +    raise MacroArgTypeError(method_name, arg_name, got_value, expected_type)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="SecretEnvVarLocationError"),
    +    reason=REASON,
    +)
     def disallow_secret_env_var(env_var_name) -> NoReturn:
         """Raise an error when a secret env var is referenced outside allowed
         rendering contexts"""
    -    raise DisallowSecretEnvVar(env_var_name)
    +    raise SecretEnvVarLocationError(env_var_name)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="ParsingError"),
    +    reason=REASON,
    +)
     def raise_parsing_error(msg, node=None) -> NoReturn:
    -    raise ParsingException(msg, node)
    +    raise ParsingError(msg, node)
     
     
    -# These are the exceptions functions that were not called within dbt-core but will remain here but deprecated to give a chance to rework
    -# TODO: is this valid?  Should I create a special exception class for this?
    +# These are the exceptions functions that were not called within dbt-core but will remain
    +# here deprecated to give a chance for adapters to rework
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="UnrecognizedCredentialTypeError"),
    +    reason=REASON,
    +)
     def raise_unrecognized_credentials_type(typename, supported_types):
    -    msg = 'Unrecognized credentials type "{}" - supported types are ({})'.format(
    -        typename, ", ".join('"{}"'.format(t) for t in supported_types)
    -    )
    -    raise CompilationException(msg)
    +    raise UnrecognizedCredentialTypeError(typename, supported_types)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="PatchTargetNotFoundError"),
    +    reason=REASON,
    +)
     def raise_patch_targets_not_found(patches):
    -    patch_list = "\n\t".join(
    -        f"model {p.name} (referenced in path {p.original_file_path})" for p in patches.values()
    -    )
    -    msg = f"dbt could not find models for the following patches:\n\t{patch_list}"
    -    raise CompilationException(msg)
    +    raise PatchTargetNotFoundError(patches)
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="RelationReturnedMultipleResultsError"),
    +    reason=REASON,
    +)
     def multiple_matching_relations(kwargs, matches):
    -    raise RelationReturnedMultipleResults(kwargs, matches)
    +    raise RelationReturnedMultipleResultsError(kwargs, matches)
     
     
    -# while this isn't in our code I wouldn't be surpised it's in adapter code
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="MaterializationNotAvailableError"),
    +    reason=REASON,
    +)
     def materialization_not_available(model, adapter_type):
    -    raise MaterializationNotAvailable(model, adapter_type)
    +    materialization = model.config.materialized
    +    raise MaterializationNotAvailableError(
    +        materialization=materialization, adapter_type=adapter_type
    +    )
     
     
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action=SUGGESTED_ACTION.format(exception="MacroNotFoundError"),
    +    reason=REASON,
    +)
     def macro_not_found(model, target_macro_id):
    -    msg = f"'{model.unique_id}' references macro '{target_macro_id}' which is not defined!"
    -    raise CompilationException(msg=msg, node=model)
    +    raise MacroNotFoundError(node=model, target_macro_id=target_macro_id)
     
     
     # adapters use this to format messages.  it should be deprecated but live on for now
    +# TODO: What should the message here be?
    +@deprecated(
    +    version=DEPRECATION_VERSION,
    +    suggested_action="Format this message in the adapter",
    +    reason="`validator_error_message` is now a mathod on DbtRuntimeError",
    +)
     def validator_error_message(exc):
         """Given a dbt.dataclass_schema.ValidationError (which is basically a
         jsonschema.ValidationError), return the relevant parts as a string
    diff --git a/core/dbt/flags.py b/core/dbt/flags.py
    index 14e60c834c6..e5b94c7415b 100644
    --- a/core/dbt/flags.py
    +++ b/core/dbt/flags.py
    @@ -1,7 +1,9 @@
    -import os
    +# Do not import the os package because we expose this package in jinja
    +from os import name as os_name, path as os_path, getcwd as os_getcwd, getenv as os_getenv
     import multiprocessing
    +from argparse import Namespace
     
    -if os.name != "nt":
    +if os_name != "nt":
         # https://bugs.python.org/issue41567
         import multiprocessing.popen_spawn_posix  # type: ignore
     from pathlib import Path
    @@ -10,14 +12,14 @@
     # PROFILES_DIR must be set before the other flags
     # It also gets set in main.py and in set_from_args because the rpc server
     # doesn't go through exactly the same main arg processing.
    -GLOBAL_PROFILES_DIR = os.path.join(os.path.expanduser("~"), ".dbt")
    -LOCAL_PROFILES_DIR = os.getcwd()
    +GLOBAL_PROFILES_DIR = os_path.join(os_path.expanduser("~"), ".dbt")
    +LOCAL_PROFILES_DIR = os_getcwd()
     # Use the current working directory if there is a profiles.yml file present there
    -if os.path.exists(Path(LOCAL_PROFILES_DIR) / Path("profiles.yml")):
    +if os_path.exists(Path(LOCAL_PROFILES_DIR) / Path("profiles.yml")):
         DEFAULT_PROFILES_DIR = LOCAL_PROFILES_DIR
     else:
         DEFAULT_PROFILES_DIR = GLOBAL_PROFILES_DIR
    -PROFILES_DIR = os.path.expanduser(os.getenv("DBT_PROFILES_DIR", DEFAULT_PROFILES_DIR))
    +PROFILES_DIR = os_path.expanduser(os_getenv("DBT_PROFILES_DIR", DEFAULT_PROFILES_DIR))
     
     STRICT_MODE = False  # Only here for backwards compatibility
     FULL_REFRESH = False  # subcommand
    @@ -44,6 +46,7 @@
     USE_EXPERIMENTAL_PARSER = None
     VERSION_CHECK = None
     WARN_ERROR = None
    +WARN_ERROR_OPTIONS = None
     WHICH = None
     WRITE_JSON = None
     
    @@ -54,6 +57,7 @@
         "INDIRECT_SELECTION",
         "TARGET_PATH",
         "LOG_PATH",
    +    "WARN_ERROR_OPTIONS",
     ]
     
     _NON_DBT_ENV_FLAGS = ["DO_NOT_TRACK"]
    @@ -84,6 +88,7 @@
         "USE_EXPERIMENTAL_PARSER": False,
         "VERSION_CHECK": True,
         "WARN_ERROR": False,
    +    "WARN_ERROR_OPTIONS": "{}",
         "WRITE_JSON": True,
     }
     
    @@ -92,7 +97,7 @@ def env_set_truthy(key: str) -> Optional[str]:
         """Return the value if it was set to a "truthy" string value or None
         otherwise.
         """
    -    value = os.getenv(key)
    +    value = os_getenv(key)
         if not value or value.lower() in ("0", "false", "f"):
             return None
         return value
    @@ -105,7 +110,7 @@ def env_set_bool(env_value):
     
     
     def env_set_path(key: str) -> Optional[Path]:
    -    value = os.getenv(key)
    +    value = os_getenv(key)
         if value is None:
             return value
         else:
    @@ -114,7 +119,7 @@ def env_set_path(key: str) -> Optional[Path]:
     
     MACRO_DEBUGGING = env_set_truthy("DBT_MACRO_DEBUGGING")
     DEFER_MODE = env_set_truthy("DBT_DEFER_TO_STATE")
    -FAVOR_STATE_MODE = env_set_truthy("DBT_FAVOR_STATE_STATE")
    +FAVOR_STATE_MODE = env_set_truthy("DBT_FAVOR_STATE")
     ARTIFACT_STATE_PATH = env_set_path("DBT_ARTIFACT_STATE_PATH")
     ENABLE_LEGACY_LOGGER = env_set_truthy("DBT_ENABLE_LEGACY_LOGGER")
     
    @@ -132,7 +137,7 @@ def set_from_args(args, user_config):
         # N.B. Multiple `globals` are purely for line length.
         # Because `global` is a parser directive (as opposed to a language construct)
         # black insists in putting them all on one line
    -    global STRICT_MODE, FULL_REFRESH, WARN_ERROR, USE_EXPERIMENTAL_PARSER, STATIC_PARSER
    +    global STRICT_MODE, FULL_REFRESH, WARN_ERROR, WARN_ERROR_OPTIONS, USE_EXPERIMENTAL_PARSER, STATIC_PARSER
         global WRITE_JSON, PARTIAL_PARSE, USE_COLORS, STORE_FAILURES, PROFILES_DIR, DEBUG, LOG_FORMAT
         global INDIRECT_SELECTION, VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS, ANONYMOUS_USAGE_STATS
         global PRINTER_WIDTH, WHICH, LOG_CACHE_EVENTS, QUIET, NO_PRINT, CACHE_SELECTED_ONLY
    @@ -165,8 +170,10 @@ def set_from_args(args, user_config):
         USE_EXPERIMENTAL_PARSER = get_flag_value("USE_EXPERIMENTAL_PARSER", args, user_config)
         VERSION_CHECK = get_flag_value("VERSION_CHECK", args, user_config)
         WARN_ERROR = get_flag_value("WARN_ERROR", args, user_config)
    +    WARN_ERROR_OPTIONS = get_flag_value("WARN_ERROR_OPTIONS", args, user_config)
         WRITE_JSON = get_flag_value("WRITE_JSON", args, user_config)
     
    +    _check_mutually_exclusive(["WARN_ERROR", "WARN_ERROR_OPTIONS"], args, user_config)
         _set_overrides_from_env()
     
     
    @@ -183,44 +190,59 @@ def _set_overrides_from_env():
     
     
     def get_flag_value(flag, args, user_config):
    -    flag_value = _load_flag_value(flag, args, user_config)
    +    flag_value, _ = _load_flag_value(flag, args, user_config)
     
         if flag == "PRINTER_WIDTH":  # must be ints
             flag_value = int(flag_value)
         if flag == "PROFILES_DIR":
    -        flag_value = os.path.abspath(flag_value)
    +        flag_value = os_path.abspath(flag_value)
     
         return flag_value
     
     
    +def _check_mutually_exclusive(group, args, user_config):
    +    set_flag = None
    +    for flag in group:
    +        flag_set_by_user = not _flag_value_from_default(flag, args, user_config)
    +        if flag_set_by_user and set_flag:
    +            raise ValueError(f"{flag.lower()}: not allowed with argument {set_flag.lower()}")
    +        elif flag_set_by_user:
    +            set_flag = flag
    +
    +
    +def _flag_value_from_default(flag, args, user_config):
    +    _, from_default = _load_flag_value(flag, args, user_config)
    +
    +    return from_default
    +
    +
     def _load_flag_value(flag, args, user_config):
         lc_flag = flag.lower()
         flag_value = getattr(args, lc_flag, None)
         if flag_value is not None:
    -        return flag_value
    +        return flag_value, False
     
         flag_value = _get_flag_value_from_env(flag)
         if flag_value is not None:
    -        return flag_value
    +        return flag_value, False
     
         if user_config is not None and getattr(user_config, lc_flag, None) is not None:
    -        return getattr(user_config, lc_flag)
    +        return getattr(user_config, lc_flag), False
     
    -    return flag_defaults[flag]
    +    return flag_defaults[flag], True
     
     
     def _get_flag_value_from_env(flag):
         # Environment variables use pattern 'DBT_{flag name}'
         env_flag = _get_env_flag(flag)
    -    env_value = os.getenv(env_flag)
    +    env_value = os_getenv(env_flag)
         if env_value is None or env_value == "":
             return None
     
    -    env_value = env_value.lower()
         if flag in _NON_BOOLEAN_FLAGS:
             flag_value = env_value
         else:
    -        flag_value = env_set_bool(env_value)
    +        flag_value = env_set_bool(env_value.lower())
     
         return flag_value
     
    @@ -234,6 +256,7 @@ def get_flag_dict():
             "use_experimental_parser": USE_EXPERIMENTAL_PARSER,
             "static_parser": STATIC_PARSER,
             "warn_error": WARN_ERROR,
    +        "warn_error_options": WARN_ERROR_OPTIONS,
             "write_json": WRITE_JSON,
             "partial_parse": PARTIAL_PARSE,
             "use_colors": USE_COLORS,
    @@ -249,4 +272,21 @@ def get_flag_dict():
             "log_cache_events": LOG_CACHE_EVENTS,
             "quiet": QUIET,
             "no_print": NO_PRINT,
    +        "cache_selected_only": CACHE_SELECTED_ONLY,
    +        "target_path": TARGET_PATH,
    +        "log_path": LOG_PATH,
         }
    +
    +
    +# This is used by core/dbt/context/base.py to return a flag object
    +# in Jinja.
    +def get_flag_obj():
    +    new_flags = Namespace()
    +    for k, v in get_flag_dict().items():
    +        setattr(new_flags, k.upper(), v)
    +    # The following 3 are CLI arguments only so they're not full-fledged flags,
    +    # but we put in flags for users.
    +    setattr(new_flags, "FULL_REFRESH", FULL_REFRESH)
    +    setattr(new_flags, "STORE_FAILURES", STORE_FAILURES)
    +    setattr(new_flags, "WHICH", WHICH)
    +    return new_flags
    diff --git a/core/dbt/graph/cli.py b/core/dbt/graph/cli.py
    index 51464912a1b..22a65ba92dc 100644
    --- a/core/dbt/graph/cli.py
    +++ b/core/dbt/graph/cli.py
    @@ -1,4 +1,6 @@
     # special support for CLI argument parsing.
    +# TODO: Remove as part of https://github.com/dbt-labs/dbt-core/issues/6701
    +from dbt import flags
     from copy import deepcopy
     import itertools
     from dbt.clients.yaml_helper import yaml, Loader, Dumper  # noqa: F401
    @@ -6,7 +8,7 @@
     from typing import Dict, List, Optional, Tuple, Any, Union
     
     from dbt.contracts.selection import SelectorDefinition, SelectorFile
    -from dbt.exceptions import InternalException, ValidationException
    +from dbt.exceptions import DbtInternalError, DbtValidationError
     
     from .selector_spec import (
         SelectionUnion,
    @@ -43,12 +45,14 @@ def parse_union(
                     components=intersection_components,
                     expect_exists=expect_exists,
                     raw=raw_spec,
    +                indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION),
                 )
             )
         return SelectionUnion(
             components=union_components,
             expect_exists=False,
             raw=components,
    +        indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION),
         )
     
     
    @@ -80,9 +84,12 @@ def parse_difference(
             include, DEFAULT_INCLUDES, indirect_selection=IndirectSelection(indirect_selection)
         )
         excluded = parse_union_from_default(
    -        exclude, DEFAULT_EXCLUDES, indirect_selection=IndirectSelection.Eager
    +        exclude, DEFAULT_EXCLUDES, indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION)
    +    )
    +    return SelectionDifference(
    +        components=[included, excluded],
    +        indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION),
         )
    -    return SelectionDifference(components=[included, excluded])
     
     
     RawDefinition = Union[str, Dict[str, Any]]
    @@ -91,15 +98,15 @@ def parse_difference(
     def _get_list_dicts(dct: Dict[str, Any], key: str) -> List[RawDefinition]:
         result: List[RawDefinition] = []
         if key not in dct:
    -        raise InternalException(f"Expected to find key {key} in dict, only found {list(dct)}")
    +        raise DbtInternalError(f"Expected to find key {key} in dict, only found {list(dct)}")
         values = dct[key]
         if not isinstance(values, list):
    -        raise ValidationException(f'Invalid value for key "{key}". Expected a list.')
    +        raise DbtValidationError(f'Invalid value for key "{key}". Expected a list.')
         for value in values:
             if isinstance(value, dict):
                 for value_key in value:
                     if not isinstance(value_key, str):
    -                    raise ValidationException(
    +                    raise DbtValidationError(
                             f'Expected all keys to "{key}" dict to be strings, '
                             f'but "{value_key}" is a "{type(value_key)}"'
                         )
    @@ -107,7 +114,7 @@ def _get_list_dicts(dct: Dict[str, Any], key: str) -> List[RawDefinition]:
             elif isinstance(value, str):
                 result.append(value)
             else:
    -            raise ValidationException(
    +            raise DbtValidationError(
                     f'Invalid value type {type(value)} in key "{key}", expected '
                     f"dict or str (value: {value})."
                 )
    @@ -137,7 +144,7 @@ def _parse_include_exclude_subdefs(
                 # do not allow multiple exclude: defs at the same level
                 if diff_arg is not None:
                     yaml_sel_cfg = yaml.dump(definition)
    -                raise ValidationException(
    +                raise DbtValidationError(
                         f"You cannot provide multiple exclude arguments to the "
                         f"same selector set operator:\n{yaml_sel_cfg}"
                     )
    @@ -179,7 +186,7 @@ def parse_dict_definition(definition: Dict[str, Any], result={}) -> SelectionSpe
             key = list(definition)[0]
             value = definition[key]
             if not isinstance(key, str):
    -            raise ValidationException(
    +            raise DbtValidationError(
                     f'Expected definition key to be a "str", got one of type ' f'"{type(key)}" ({key})'
                 )
             dct = {
    @@ -189,7 +196,7 @@ def parse_dict_definition(definition: Dict[str, Any], result={}) -> SelectionSpe
         elif definition.get("method") == "selector":
             sel_def = definition.get("value")
             if sel_def not in result:
    -            raise ValidationException(f"Existing selector definition for {sel_def} not found.")
    +            raise DbtValidationError(f"Existing selector definition for {sel_def} not found.")
             return result[definition["value"]]["definition"]
         elif "method" in definition and "value" in definition:
             dct = definition
    @@ -197,7 +204,7 @@ def parse_dict_definition(definition: Dict[str, Any], result={}) -> SelectionSpe
                 diff_arg = _parse_exclusions(definition, result=result)
                 dct = {k: v for k, v in dct.items() if k != "exclude"}
         else:
    -        raise ValidationException(
    +        raise DbtValidationError(
                 f'Expected either 1 key or else "method" '
                 f'and "value" keys, but got {list(definition)}'
             )
    @@ -223,7 +230,7 @@ def parse_from_definition(
             and len(definition) > 1
         ):
             keys = ",".join(definition.keys())
    -        raise ValidationException(
    +        raise DbtValidationError(
                 f"Only a single 'union' or 'intersection' key is allowed "
                 f"in a root level selector definition; found {keys}."
             )
    @@ -236,7 +243,7 @@ def parse_from_definition(
         elif isinstance(definition, dict):
             return parse_dict_definition(definition, result=result)
         else:
    -        raise ValidationException(
    +        raise DbtValidationError(
                 f"Expected to find union, intersection, str or dict, instead "
                 f"found {type(definition)}: {definition}"
             )
    diff --git a/core/dbt/graph/graph.py b/core/dbt/graph/graph.py
    index 2dda596e073..9c20750cd54 100644
    --- a/core/dbt/graph/graph.py
    +++ b/core/dbt/graph/graph.py
    @@ -2,7 +2,7 @@
     from itertools import product
     import networkx as nx  # type: ignore
     
    -from dbt.exceptions import InternalException
    +from dbt.exceptions import DbtInternalError
     
     UniqueId = NewType("UniqueId", str)
     
    @@ -27,7 +27,7 @@ def __iter__(self) -> Iterator[UniqueId]:
         def ancestors(self, node: UniqueId, max_depth: Optional[int] = None) -> Set[UniqueId]:
             """Returns all nodes having a path to `node` in `graph`"""
             if not self.graph.has_node(node):
    -            raise InternalException(f"Node {node} not found in the graph!")
    +            raise DbtInternalError(f"Node {node} not found in the graph!")
             return {
                 child
                 for _, child in nx.bfs_edges(self.graph, node, reverse=True, depth_limit=max_depth)
    @@ -36,7 +36,7 @@ def ancestors(self, node: UniqueId, max_depth: Optional[int] = None) -> Set[Uniq
         def descendants(self, node: UniqueId, max_depth: Optional[int] = None) -> Set[UniqueId]:
             """Returns all nodes reachable from `node` in `graph`"""
             if not self.graph.has_node(node):
    -            raise InternalException(f"Node {node} not found in the graph!")
    +            raise DbtInternalError(f"Node {node} not found in the graph!")
             return {child for _, child in nx.bfs_edges(self.graph, node, depth_limit=max_depth)}
     
         def select_childrens_parents(self, selected: Set[UniqueId]) -> Set[UniqueId]:
    diff --git a/core/dbt/graph/queue.py b/core/dbt/graph/queue.py
    index 3c3b9625d27..a21a9afc630 100644
    --- a/core/dbt/graph/queue.py
    +++ b/core/dbt/graph/queue.py
    @@ -40,7 +40,7 @@ def __init__(self, graph: nx.DiGraph, manifest: Manifest, selected: Set[UniqueId
             # store the 'score' of each node as a number. Lower is higher priority.
             self._scores = self._get_scores(self.graph)
             # populate the initial queue
    -        self._find_new_additions()
    +        self._find_new_additions(list(self.graph.nodes()))
             # awaits after task end
             self.some_task_done = threading.Condition(self.lock)
     
    @@ -156,12 +156,12 @@ def _already_known(self, node: UniqueId) -> bool:
             """
             return node in self.in_progress or node in self.queued
     
    -    def _find_new_additions(self) -> None:
    +    def _find_new_additions(self, candidates) -> None:
             """Find any nodes in the graph that need to be added to the internal
             queue and add them.
             """
    -        for node, in_degree in self.graph.in_degree():
    -            if not self._already_known(node) and in_degree == 0:
    +        for node in candidates:
    +            if self.graph.in_degree(node) == 0 and not self._already_known(node):
                     self.inner.put((self._scores[node], node))
                     self.queued.add(node)
     
    @@ -174,8 +174,9 @@ def mark_done(self, node_id: UniqueId) -> None:
             """
             with self.lock:
                 self.in_progress.remove(node_id)
    +            successors = list(self.graph.successors(node_id))
                 self.graph.remove_node(node_id)
    -            self._find_new_additions()
    +            self._find_new_additions(successors)
                 self.inner.task_done()
                 self.some_task_done.notify_all()
     
    diff --git a/core/dbt/graph/selector.py b/core/dbt/graph/selector.py
    index ed91596712b..fdae6327d0e 100644
    --- a/core/dbt/graph/selector.py
    +++ b/core/dbt/graph/selector.py
    @@ -9,8 +9,8 @@
     from dbt.events.types import SelectorReportInvalidSelector, NoNodesForSelectionCriteria
     from dbt.node_types import NodeType
     from dbt.exceptions import (
    -    InternalException,
    -    InvalidSelectorException,
    +    DbtInternalError,
    +    InvalidSelectorError,
     )
     from dbt.contracts.graph.nodes import GraphMemberNode
     from dbt.contracts.graph.manifest import Manifest
    @@ -78,7 +78,7 @@ def get_nodes_from_criteria(
             nodes = self.graph.nodes()
             try:
                 collected = self.select_included(nodes, spec)
    -        except InvalidSelectorException:
    +        except InvalidSelectorError:
                 valid_selectors = ", ".join(self.SELECTOR_METHODS)
                 fire_event(
                     SelectorReportInvalidSelector(
    @@ -134,7 +134,9 @@ def select_nodes_recursively(self, spec: SelectionSpec) -> Tuple[Set[UniqueId],
                 initial_direct = spec.combined(direct_sets)
                 indirect_nodes = spec.combined(indirect_sets)
     
    -            direct_nodes = self.incorporate_indirect_nodes(initial_direct, indirect_nodes)
    +            direct_nodes = self.incorporate_indirect_nodes(
    +                initial_direct, indirect_nodes, spec.indirect_selection
    +            )
     
                 if spec.expect_exists and len(direct_nodes) == 0:
                     warn_or_error(NoNodesForSelectionCriteria(spec_raw=str(spec.raw)))
    @@ -181,7 +183,7 @@ def _is_match(self, unique_id: UniqueId) -> bool:
             elif unique_id in self.manifest.metrics:
                 node = self.manifest.metrics[unique_id]
             else:
    -            raise InternalException(f"Node {unique_id} not found in the manifest!")
    +            raise DbtInternalError(f"Node {unique_id} not found in the manifest!")
             return self.node_is_match(node)
     
         def filter_selection(self, selected: Set[UniqueId]) -> Set[UniqueId]:
    @@ -197,7 +199,7 @@ def expand_selection(
         ) -> Tuple[Set[UniqueId], Set[UniqueId]]:
             # Test selection by default expands to include an implicitly/indirectly selected tests.
             # `dbt test -m model_a` also includes tests that directly depend on `model_a`.
    -        # Expansion has two modes, EAGER and CAUTIOUS.
    +        # Expansion has three modes, EAGER, CAUTIOUS and BUILDABLE.
             #
             # EAGER mode: If ANY parent is selected, select the test.
             #
    @@ -205,11 +207,22 @@ def expand_selection(
             #  - If ALL parents are selected, select the test.
             #  - If ANY parent is missing, return it separately. We'll keep it around
             #    for later and see if its other parents show up.
    +        #
    +        # BUILDABLE mode:
    +        #  - If ALL parents are selected, or the parents of the test are themselves parents of the selected, select the test.
    +        #  - If ANY parent is missing, return it separately. We'll keep it around
    +        #    for later and see if its other parents show up.
    +        #
             # Users can opt out of inclusive EAGER mode by passing --indirect-selection cautious
             # CLI argument or by specifying `indirect_selection: true` in a yaml selector
     
             direct_nodes = set(selected)
             indirect_nodes = set()
    +        selected_and_parents = set()
    +        if indirect_selection == IndirectSelection.Buildable:
    +            selected_and_parents = selected.union(self.graph.select_parents(selected)).union(
    +                self.manifest.sources
    +            )
     
             for unique_id in self.graph.select_successors(selected):
                 if unique_id in self.manifest.nodes:
    @@ -220,14 +233,20 @@ def expand_selection(
                             node.depends_on_nodes
                         ) <= set(selected):
                             direct_nodes.add(unique_id)
    -                    # if not:
    +                    elif indirect_selection == IndirectSelection.Buildable and set(
    +                        node.depends_on_nodes
    +                    ) <= set(selected_and_parents):
    +                        direct_nodes.add(unique_id)
                         else:
                             indirect_nodes.add(unique_id)
     
             return direct_nodes, indirect_nodes
     
         def incorporate_indirect_nodes(
    -        self, direct_nodes: Set[UniqueId], indirect_nodes: Set[UniqueId] = set()
    +        self,
    +        direct_nodes: Set[UniqueId],
    +        indirect_nodes: Set[UniqueId] = set(),
    +        indirect_selection: IndirectSelection = IndirectSelection.Eager,
         ) -> Set[UniqueId]:
             # Check tests previously selected indirectly to see if ALL their
             # parents are now present.
    @@ -238,11 +257,19 @@ def incorporate_indirect_nodes(
     
             selected = set(direct_nodes)
     
    -        for unique_id in indirect_nodes:
    -            if unique_id in self.manifest.nodes:
    -                node = self.manifest.nodes[unique_id]
    -                if set(node.depends_on_nodes) <= set(selected):
    -                    selected.add(unique_id)
    +        if indirect_selection == IndirectSelection.Cautious:
    +            for unique_id in indirect_nodes:
    +                if unique_id in self.manifest.nodes:
    +                    node = self.manifest.nodes[unique_id]
    +                    if set(node.depends_on_nodes) <= set(selected):
    +                        selected.add(unique_id)
    +        elif indirect_selection == IndirectSelection.Buildable:
    +            selected_and_parents = selected.union(self.graph.select_parents(selected))
    +            for unique_id in indirect_nodes:
    +                if unique_id in self.manifest.nodes:
    +                    node = self.manifest.nodes[unique_id]
    +                    if set(node.depends_on_nodes) <= set(selected_and_parents):
    +                        selected.add(unique_id)
     
             return selected
     
    diff --git a/core/dbt/graph/selector_methods.py b/core/dbt/graph/selector_methods.py
    index c77625649bc..2c73d480dae 100644
    --- a/core/dbt/graph/selector_methods.py
    +++ b/core/dbt/graph/selector_methods.py
    @@ -19,8 +19,8 @@
     )
     from dbt.contracts.state import PreviousState
     from dbt.exceptions import (
    -    InternalException,
    -    RuntimeException,
    +    DbtInternalError,
    +    DbtRuntimeError,
     )
     from dbt.node_types import NodeType
     
    @@ -207,7 +207,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu
                     "`${{source_name}}.${{target_name}}`, or "
                     "`${{package_name}}.${{source_name}}.${{target_name}}"
                 ).format(selector)
    -            raise RuntimeException(msg)
    +            raise DbtRuntimeError(msg)
     
             for node, real_node in self.source_nodes(included_nodes):
                 if target_package not in (real_node.package_name, SELECTOR_GLOB):
    @@ -234,7 +234,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu
                     "the form ${{exposure_name}} or "
                     "${{exposure_package.exposure_name}}"
                 ).format(selector)
    -            raise RuntimeException(msg)
    +            raise DbtRuntimeError(msg)
     
             for node, real_node in self.exposure_nodes(included_nodes):
                 if target_package not in (real_node.package_name, SELECTOR_GLOB):
    @@ -259,7 +259,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu
                     "the form ${{metric_name}} or "
                     "${{metric_package.metric_name}}"
                 ).format(selector)
    -            raise RuntimeException(msg)
    +            raise DbtRuntimeError(msg)
     
             for node, real_node in self.metric_nodes(included_nodes):
                 if target_package not in (real_node.package_name, SELECTOR_GLOB):
    @@ -367,7 +367,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu
             try:
                 resource_type = NodeType(selector)
             except ValueError as exc:
    -            raise RuntimeException(f'Invalid resource_type selector "{selector}"') from exc
    +            raise DbtRuntimeError(f'Invalid resource_type selector "{selector}"') from exc
             for node, real_node in self.parsed_nodes(included_nodes):
                 if real_node.resource_type == resource_type:
                     yield node
    @@ -390,7 +390,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu
             elif selector in ("singular", "data"):
                 search_type = SingularTestNode
             else:
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     f'Invalid test type selector {selector}: expected "generic" or ' '"singular"'
                 )
     
    @@ -407,7 +407,7 @@ def __init__(self, *args, **kwargs):
         def _macros_modified(self) -> List[str]:
             # we checked in the caller!
             if self.previous_state is None or self.previous_state.manifest is None:
    -            raise InternalException("No comparison manifest in _macros_modified")
    +            raise DbtInternalError("No comparison manifest in _macros_modified")
             old_macros = self.previous_state.manifest.macros
             new_macros = self.manifest.macros
     
    @@ -496,7 +496,7 @@ def check_new(self, old: Optional[SelectorTarget], new: SelectorTarget) -> bool:
     
         def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
             if self.previous_state is None or self.previous_state.manifest is None:
    -            raise RuntimeException("Got a state selector method, but no comparison manifest")
    +            raise DbtRuntimeError("Got a state selector method, but no comparison manifest")
     
             state_checks = {
                 # it's new if there is no old version
    @@ -514,7 +514,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu
             if selector in state_checks:
                 checker = state_checks[selector]
             else:
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     f'Got an invalid selector "{selector}", expected one of ' f'"{list(state_checks)}"'
                 )
     
    @@ -538,7 +538,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu
     class ResultSelectorMethod(SelectorMethod):
         def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
             if self.previous_state is None or self.previous_state.results is None:
    -            raise InternalException("No comparison run_results")
    +            raise DbtInternalError("No comparison run_results")
             matches = set(
                 result.unique_id for result in self.previous_state.results if result.status == selector
             )
    @@ -551,13 +551,11 @@ class SourceStatusSelectorMethod(SelectorMethod):
         def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
     
             if self.previous_state is None or self.previous_state.sources is None:
    -            raise InternalException(
    +            raise DbtInternalError(
                     "No previous state comparison freshness results in sources.json"
                 )
             elif self.previous_state.sources_current is None:
    -            raise InternalException(
    -                "No current state comparison freshness results in sources.json"
    -            )
    +            raise DbtInternalError("No current state comparison freshness results in sources.json")
     
             current_state_sources = {
                 result.unique_id: getattr(result, "max_loaded_at", 0)
    @@ -633,7 +631,7 @@ def __init__(
         def get_method(self, method: MethodName, method_arguments: List[str]) -> SelectorMethod:
     
             if method not in self.SELECTOR_METHODS:
    -            raise InternalException(
    +            raise DbtInternalError(
                     f'Method name "{method}" is a valid node selection '
                     f"method name, but it is not handled"
                 )
    diff --git a/core/dbt/graph/selector_spec.py b/core/dbt/graph/selector_spec.py
    index 991ae7fcb89..af7ae014163 100644
    --- a/core/dbt/graph/selector_spec.py
    +++ b/core/dbt/graph/selector_spec.py
    @@ -7,7 +7,7 @@
     from typing import Set, Iterator, List, Optional, Dict, Union, Any, Iterable, Tuple
     from .graph import UniqueId
     from .selector_methods import MethodName
    -from dbt.exceptions import RuntimeException, InvalidSelectorException
    +from dbt.exceptions import DbtRuntimeError, InvalidSelectorError
     
     
     RAW_SELECTOR_PATTERN = re.compile(
    @@ -24,6 +24,7 @@
     class IndirectSelection(StrEnum):
         Eager = "eager"
         Cautious = "cautious"
    +    Buildable = "buildable"
     
     
     def _probably_path(value: str):
    @@ -46,7 +47,7 @@ def _match_to_int(match: Dict[str, str], key: str) -> Optional[int]:
         try:
             return int(raw)
         except ValueError as exc:
    -        raise RuntimeException(f"Invalid node spec - could not handle parent depth {raw}") from exc
    +        raise DbtRuntimeError(f"Invalid node spec - could not handle parent depth {raw}") from exc
     
     
     SelectionSpec = Union[
    @@ -72,7 +73,7 @@ class SelectionCriteria:
     
         def __post_init__(self):
             if self.children and self.childrens_parents:
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     f'Invalid node spec {self.raw} - "@" prefix and "+" suffix ' "are incompatible"
                 )
     
    @@ -95,9 +96,7 @@ def parse_method(cls, groupdict: Dict[str, Any]) -> Tuple[MethodName, List[str]]
             try:
                 method_name = MethodName(method_parts[0])
             except ValueError as exc:
    -            raise InvalidSelectorException(
    -                f"'{method_parts[0]}' is not a valid method name"
    -            ) from exc
    +            raise InvalidSelectorError(f"'{method_parts[0]}' is not a valid method name") from exc
     
             method_arguments: List[str] = method_parts[1:]
     
    @@ -111,7 +110,7 @@ def selection_criteria_from_dict(
             indirect_selection: IndirectSelection = IndirectSelection.Eager,
         ) -> "SelectionCriteria":
             if "value" not in dct:
    -            raise RuntimeException(f'Invalid node spec "{raw}" - no search value!')
    +            raise DbtRuntimeError(f'Invalid node spec "{raw}" - no search value!')
             method_name, method_arguments = cls.parse_method(dct)
     
             parents_depth = _match_to_int(dct, "parents_depth")
    @@ -162,7 +161,7 @@ def from_single_spec(
             result = RAW_SELECTOR_PATTERN.match(raw)
             if result is None:
                 # bad spec!
    -            raise RuntimeException(f'Invalid selector spec "{raw}"')
    +            raise DbtRuntimeError(f'Invalid selector spec "{raw}"')
     
             return cls.selection_criteria_from_dict(
                 raw, result.groupdict(), indirect_selection=indirect_selection
    @@ -173,12 +172,14 @@ class BaseSelectionGroup(dbtClassMixin, Iterable[SelectionSpec], metaclass=ABCMe
         def __init__(
             self,
             components: Iterable[SelectionSpec],
    +        indirect_selection: IndirectSelection = IndirectSelection.Eager,
             expect_exists: bool = False,
             raw: Any = None,
         ):
             self.components: List[SelectionSpec] = list(components)
             self.expect_exists = expect_exists
             self.raw = raw
    +        self.indirect_selection = indirect_selection
     
         def __iter__(self) -> Iterator[SelectionSpec]:
             for component in self.components:
    diff --git a/core/dbt/helper_types.py b/core/dbt/helper_types.py
    index a8ff90fa75f..84f253b00c6 100644
    --- a/core/dbt/helper_types.py
    +++ b/core/dbt/helper_types.py
    @@ -7,15 +7,16 @@
     from datetime import timedelta
     from pathlib import Path
     from typing import Tuple, AbstractSet, Union
    +from hologram import FieldEncoder, JsonDict
    +from mashumaro.types import SerializableType
    +from typing import Callable, cast, Generic, Optional, TypeVar, List
     
     from dbt.dataclass_schema import (
         dbtClassMixin,
         ValidationError,
         StrEnum,
     )
    -from hologram import FieldEncoder, JsonDict
    -from mashumaro.types import SerializableType
    -from typing import Callable, cast, Generic, Optional, TypeVar
    +import dbt.events.types as dbt_event_types
     
     
     class Port(int, SerializableType):
    @@ -88,6 +89,65 @@ class NoValue(dbtClassMixin):
         novalue: NVEnum = field(default_factory=lambda: NVEnum.novalue)
     
     
    +@dataclass
    +class IncludeExclude(dbtClassMixin):
    +    INCLUDE_ALL = ("all", "*")
    +
    +    include: Union[str, List[str]]
    +    exclude: List[str] = field(default_factory=list)
    +
    +    def __post_init__(self):
    +        if isinstance(self.include, str) and self.include not in self.INCLUDE_ALL:
    +            raise ValidationError(
    +                f"include must be one of {self.INCLUDE_ALL} or a list of strings"
    +            )
    +
    +        if self.exclude and self.include not in self.INCLUDE_ALL:
    +            raise ValidationError(
    +                f"exclude can only be specified if include is one of {self.INCLUDE_ALL}"
    +            )
    +
    +        if isinstance(self.include, list):
    +            self._validate_items(self.include)
    +
    +        if isinstance(self.exclude, list):
    +            self._validate_items(self.exclude)
    +
    +    def includes(self, item_name: str):
    +        return (
    +            item_name in self.include or self.include in self.INCLUDE_ALL
    +        ) and item_name not in self.exclude
    +
    +    def _validate_items(self, items: List[str]):
    +        pass
    +
    +
    +class WarnErrorOptions(IncludeExclude):
    +    # TODO: this method can be removed once the click CLI is in use
    +    @classmethod
    +    def from_yaml_string(cls, warn_error_options_str: Optional[str]):
    +
    +        # TODO: resolve circular import
    +        from dbt.config.utils import parse_cli_yaml_string
    +
    +        warn_error_options_str = (
    +            str(warn_error_options_str) if warn_error_options_str is not None else "{}"
    +        )
    +        warn_error_options = parse_cli_yaml_string(warn_error_options_str, "warn-error-options")
    +        return cls(
    +            include=warn_error_options.get("include", []),
    +            exclude=warn_error_options.get("exclude", []),
    +        )
    +
    +    def _validate_items(self, items: List[str]):
    +        valid_exception_names = set(
    +            [name for name, cls in dbt_event_types.__dict__.items() if isinstance(cls, type)]
    +        )
    +        for item in items:
    +            if item not in valid_exception_names:
    +                raise ValidationError(f"{item} is not a valid dbt error name.")
    +
    +
     dbtClassMixin.register_field_encoders(
         {
             Port: PortEncoder(),
    diff --git a/core/dbt/include/global_project/macros/adapters/freshness.sql b/core/dbt/include/global_project/macros/adapters/freshness.sql
    index 6a5bd79d1d0..f18499a2391 100644
    --- a/core/dbt/include/global_project/macros/adapters/freshness.sql
    +++ b/core/dbt/include/global_project/macros/adapters/freshness.sql
    @@ -12,5 +12,5 @@
         where {{ filter }}
         {% endif %}
       {% endcall %}
    -  {{ return(load_result('collect_freshness').table) }}
    +  {{ return(load_result('collect_freshness')) }}
     {% endmacro %}
    diff --git a/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql b/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql
    index 5033178be49..ca972c9f258 100644
    --- a/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql
    +++ b/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql
    @@ -1,8 +1,10 @@
    -{% macro get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%}
    +{% macro get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates=none) -%}
    +   -- back compat for old kwarg name
    +  {% set incremental_predicates = kwargs.get('predicates', incremental_predicates) %}
       {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, incremental_predicates) }}
     {%- endmacro %}
     
    -{% macro default__get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%}
    +{% macro default__get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates=none) -%}
         {%- set predicates = [] if incremental_predicates is none else [] + incremental_predicates -%}
         {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%}
         {%- set merge_update_columns = config.get('merge_update_columns') -%}
    diff --git a/core/dbt/include/global_project/macros/python_model/python.sql b/core/dbt/include/global_project/macros/python_model/python.sql
    index c56ff7f31c8..64da81ae646 100644
    --- a/core/dbt/include/global_project/macros/python_model/python.sql
    +++ b/core/dbt/include/global_project/macros/python_model/python.sql
    @@ -3,7 +3,7 @@
         {%- set ref_dict = {} -%}
         {%- for _ref in model.refs -%}
             {%- set resolved = ref(*_ref) -%}
    -        {%- do ref_dict.update({_ref | join("."): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}
    +        {%- do ref_dict.update({_ref | join("."): resolved | string | replace('"', '\"')}) -%}
         {%- endfor -%}
     
     def ref(*args,dbt_load_df_function):
    @@ -18,7 +18,7 @@ def ref(*args,dbt_load_df_function):
         {%- set source_dict = {} -%}
         {%- for _source in model.sources -%}
             {%- set resolved = source(*_source) -%}
    -        {%- do source_dict.update({_source | join("."): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}
    +        {%- do source_dict.update({_source | join("."): resolved | string | replace('"', '\"')}) -%}
         {%- endfor -%}
     
     def source(*args, dbt_load_df_function):
    @@ -33,8 +33,8 @@ def source(*args, dbt_load_df_function):
         {% set config_dbt_used = zip(model.config.config_keys_used, model.config.config_keys_defaults) | list %}
         {%- for key, default in config_dbt_used -%}
             {# weird type testing with enum, would be much easier to write this logic in Python! #}
    -        {%- if key == 'language' -%}
    -          {%- set value = 'python' -%}
    +        {%- if key == "language" -%}
    +          {%- set value = "python" -%}
             {%- endif -%}
             {%- set value = model.config.get(key, default) -%}
             {%- do config_dict.update({key: value}) -%}
    @@ -62,11 +62,12 @@ class config:
     
     class this:
         """dbt.this() or dbt.this.identifier"""
    -    database = '{{ this.database }}'
    -    schema = '{{ this.schema }}'
    -    identifier = '{{ this.identifier }}'
    +    database = "{{ this.database }}"
    +    schema = "{{ this.schema }}"
    +    identifier = "{{ this.identifier }}"
    +    {% set this_relation_name = this | string | replace('"', '\\"') %}
         def __repr__(self):
    -        return '{{ this }}'
    +        return "{{ this_relation_name  }}"
     
     
     class dbtObj:
    diff --git a/core/dbt/internal_deprecations.py b/core/dbt/internal_deprecations.py
    new file mode 100644
    index 00000000000..fbc435026b6
    --- /dev/null
    +++ b/core/dbt/internal_deprecations.py
    @@ -0,0 +1,26 @@
    +import functools
    +from typing import Optional
    +
    +from dbt.events.functions import warn_or_error
    +from dbt.events.types import InternalDeprecation
    +
    +
    +def deprecated(suggested_action: str, version: str, reason: Optional[str]):
    +    def inner(func):
    +        @functools.wraps(func)
    +        def wrapped(*args, **kwargs):
    +            name = func.__name__
    +
    +            warn_or_error(
    +                InternalDeprecation(
    +                    name=name,
    +                    suggested_action=suggested_action,
    +                    version=version,
    +                    reason=reason,
    +                )
    +            )  # TODO: pass in event?
    +            return func(*args, **kwargs)
    +
    +        return wrapped
    +
    +    return inner
    diff --git a/core/dbt/lib.py b/core/dbt/lib.py
    index f4b9ab5be0e..2726f101b00 100644
    --- a/core/dbt/lib.py
    +++ b/core/dbt/lib.py
    @@ -4,7 +4,7 @@
     from dbt.contracts.results import RunningStatus, collect_timing_info
     from dbt.events.functions import fire_event
     from dbt.events.types import NodeCompiling, NodeExecuting
    -from dbt.exceptions import RuntimeException
    +from dbt.exceptions import DbtRuntimeError
     from dbt import flags
     from dbt.task.sql import SqlCompileRunner
     from dataclasses import dataclass
    @@ -125,7 +125,7 @@ def get_task_by_type(type):
         elif type == "run_operation":
             return RunOperationTask
     
    -    raise RuntimeException("not a valid task")
    +    raise DbtRuntimeError("not a valid task")
     
     
     def create_task(type, args, manifest, config):
    diff --git a/core/dbt/main.py b/core/dbt/main.py
    index d651c073765..e186142c937 100644
    --- a/core/dbt/main.py
    +++ b/core/dbt/main.py
    @@ -43,10 +43,7 @@
     
     from dbt.utils import ExitCodes, args_to_dict
     from dbt.config.profile import read_user_config
    -from dbt.exceptions import (
    -    Exception as dbtException,
    -    InternalException,
    -)
    +from dbt.exceptions import Exception as dbtException, DbtInternalError
     
     
     class DBTVersion(argparse.Action):
    @@ -89,7 +86,7 @@ def add_optional_argument_inverse(
         ):
             mutex_group = self.add_mutually_exclusive_group()
             if not name.startswith("--"):
    -            raise InternalException(
    +            raise DbtInternalError(
                     'cannot handle optional argument without "--" prefix: ' f'got "{name}"'
                 )
             if dest is None:
    @@ -201,7 +198,7 @@ def handle_and_check(args):
     def run_from_args(parsed):
         log_cache_events(getattr(parsed, "log_cache_events", False))
     
    -    # this will convert DbtConfigErrors into RuntimeExceptions
    +    # this will convert DbtConfigErrors into DbtRuntimeError
         # task could be any one of the task objects
         task = parsed.cls.from_args(args=parsed)
     
    @@ -213,11 +210,13 @@ def run_from_args(parsed):
         # WHY WE SET DEBUG TO BE TRUE HERE previously?
         setup_event_logger(log_path or "logs", "json", False, False)
     
    -    fire_event(MainReportVersion(version=str(dbt.version.installed), log_version=LOG_VERSION))
    -    fire_event(MainReportArgs(args=args_to_dict(parsed)))
    +    # For the ListTask, filter out system report logs to allow piping ls output to jq, etc
    +    if not list_task.ListTask == parsed.cls:
    +        fire_event(MainReportVersion(version=str(dbt.version.installed), log_version=LOG_VERSION))
    +        fire_event(MainReportArgs(args=args_to_dict(parsed)))
     
    -    if dbt.tracking.active_user is not None:  # mypy appeasement, always true
    -        fire_event(MainTrackingUserState(user_state=dbt.tracking.active_user.state()))
    +        if dbt.tracking.active_user is not None:  # mypy appeasement, always true
    +            fire_event(MainTrackingUserState(user_state=dbt.tracking.active_user.state()))
     
         results = None
         # this has been updated with project_id and adapter info removed, these will be added to new cli work
    @@ -332,7 +331,7 @@ def _build_init_subparser(subparsers, base_subparser):
             dest="skip_profile_setup",
             action="store_true",
             help="""
    -        Skip interative profile setup.
    +        Skip interactive profile setup.
             """,
         )
         sub.set_defaults(cls=init_task.InitTask, which="init", rpc_method=None)
    @@ -366,7 +365,7 @@ def _build_build_subparser(subparsers, base_subparser):
         )
         sub.add_argument(
             "--indirect-selection",
    -        choices=["eager", "cautious"],
    +        choices=["eager", "cautious", "buildable"],
             default="eager",
             dest="indirect_selection",
             help="""
    @@ -467,7 +466,7 @@ def _build_snapshot_subparser(subparsers, base_subparser):
         return sub
     
     
    -def _add_defer_argument(*subparsers):
    +def _add_defer_arguments(*subparsers):
         for sub in subparsers:
             sub.add_optional_argument_inverse(
                 "--defer",
    @@ -480,9 +479,19 @@ def _add_defer_argument(*subparsers):
                 """,
                 default=flags.DEFER_MODE,
             )
    +        sub.add_optional_argument_inverse(
    +            "--favor-state",
    +            enable_help="""
    +            If set, defer to the state variable for resolving unselected nodes, even if node exist as a database object in the current environment.
    +            """,
    +            disable_help="""
    +            If defer is set, expect standard defer behaviour.
    +            """,
    +            default=flags.FAVOR_STATE_MODE,
    +        )
     
     
    -def _add_favor_state_argument(*subparsers):
    +def _add_favor_state_arguments(*subparsers):
         for sub in subparsers:
             sub.add_optional_argument_inverse(
                 "--favor-state",
    @@ -563,7 +572,7 @@ def _build_docs_generate_subparser(subparsers, base_subparser):
             Do not run "dbt compile" as part of docs generation
             """,
         )
    -    _add_defer_argument(generate_sub)
    +    _add_defer_arguments(generate_sub)
         return generate_sub
     
     
    @@ -746,7 +755,7 @@ def _build_test_subparser(subparsers, base_subparser):
         )
         sub.add_argument(
             "--indirect-selection",
    -        choices=["eager", "cautious"],
    +        choices=["eager", "cautious", "buildable"],
             default="eager",
             dest="indirect_selection",
             help="""
    @@ -852,7 +861,7 @@ def _build_list_subparser(subparsers, base_subparser):
         )
         sub.add_argument(
             "--indirect-selection",
    -        choices=["eager", "cautious"],
    +        choices=["eager", "cautious", "buildable"],
             default="eager",
             dest="indirect_selection",
             help="""
    @@ -989,18 +998,32 @@ def parse_args(args, cls=DBTArgumentParser):
             """,
         )
     
    -    p.add_argument(
    +    warn_error_flag = p.add_mutually_exclusive_group()
    +    warn_error_flag.add_argument(
             "--warn-error",
             action="store_true",
             default=None,
             help="""
             If dbt would normally warn, instead raise an exception. Examples
    -        include --models that selects nothing, deprecations, configurations
    +        include --select that selects nothing, deprecations, configurations
             with no associated models, invalid test configurations, and missing
             sources/refs in tests.
             """,
         )
     
    +    warn_error_flag.add_argument(
    +        "--warn-error-options",
    +        default=None,
    +        help="""
    +        If dbt would normally warn, instead raise an exception based on
    +        include/exclude configuration. Examples include --select that selects
    +        nothing, deprecations, configurations with no associated models,
    +        invalid test configurations, and missing sources/refs in tests.
    +        This argument should be a YAML string, with keys 'include' or 'exclude'.
    +        eg. '{"include": "all", "exclude": ["NoNodesForSelectionCriteria"]}'
    +        """,
    +    )
    +
         p.add_argument(
             "--no-version-check",
             dest="version_check",
    @@ -1161,9 +1184,9 @@ def parse_args(args, cls=DBTArgumentParser):
         # list_sub sets up its own arguments.
         _add_selection_arguments(run_sub, compile_sub, generate_sub, test_sub, snapshot_sub, seed_sub)
         # --defer
    -    _add_defer_argument(run_sub, test_sub, build_sub, snapshot_sub, compile_sub)
    +    _add_defer_arguments(run_sub, test_sub, build_sub, snapshot_sub, compile_sub)
         # --favor-state
    -    _add_favor_state_argument(run_sub, test_sub, build_sub, snapshot_sub)
    +    _add_favor_state_arguments(run_sub, test_sub, build_sub, snapshot_sub)
         # --full-refresh
         _add_table_mutability_arguments(run_sub, compile_sub, build_sub)
     
    diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py
    index 9c245214d83..1f01aff36f1 100644
    --- a/core/dbt/parser/base.py
    +++ b/core/dbt/parser/base.py
    @@ -18,7 +18,7 @@
     from dbt.contracts.graph.manifest import Manifest
     from dbt.contracts.graph.nodes import ManifestNode, BaseNode
     from dbt.contracts.graph.unparsed import UnparsedNode, Docs
    -from dbt.exceptions import InternalException, InvalidConfigUpdate, InvalidDictParse
    +from dbt.exceptions import DbtInternalError, ConfigUpdateError, DictParseError
     from dbt import hooks
     from dbt.node_types import NodeType, ModelLanguage
     from dbt.parser.search import FileBlock
    @@ -76,7 +76,7 @@ def __init__(self, config: RuntimeConfig, manifest: Manifest, component: str) ->
                 root_project_name=config.project_name,
             )
             if macro is None:
    -            raise InternalException(f"No macro with name generate_{component}_name found")
    +            raise DbtInternalError(f"No macro with name generate_{component}_name found")
     
             root_context = generate_generate_name_macro_context(macro, config, manifest)
             self.updater = MacroGenerator(macro, root_context)
    @@ -224,7 +224,7 @@ def _create_parsetime_node(
                     original_file_path=block.path.original_file_path,
                     raw_code=block.contents,
                 )
    -            raise InvalidDictParse(exc, node=node)
    +            raise DictParseError(exc, node=node)
     
         def _context_for(self, parsed_node: IntermediateNode, config: ContextConfig) -> Dict[str, Any]:
             return generate_parser_model_context(parsed_node, self.root_project, self.manifest, config)
    @@ -345,7 +345,7 @@ def initial_config(self, fqn: List[str]) -> ContextConfig:
                     self.project.project_name,
                 )
             else:
    -            raise InternalException(
    +            raise DbtInternalError(
                     f"Got an unexpected project version={config_version}, expected 2"
                 )
     
    @@ -363,7 +363,7 @@ def render_update(self, node: IntermediateNode, config: ContextConfig) -> None:
                 self.update_parsed_node_config(node, config, context=context)
             except ValidationError as exc:
                 # we got a ValidationError - probably bad types in config()
    -            raise InvalidConfigUpdate(exc, node=node) from exc
    +            raise ConfigUpdateError(exc, node=node) from exc
     
         def add_result_node(self, block: FileBlock, node: ManifestNode):
             if node.config.enabled:
    diff --git a/core/dbt/parser/generic_test.py b/core/dbt/parser/generic_test.py
    index 822dd5b2d85..ea281e1c993 100644
    --- a/core/dbt/parser/generic_test.py
    +++ b/core/dbt/parser/generic_test.py
    @@ -2,7 +2,7 @@
     
     import jinja2
     
    -from dbt.exceptions import ParsingException
    +from dbt.exceptions import ParsingError
     from dbt.clients import jinja
     from dbt.contracts.graph.nodes import GenericTestNode, Macro
     from dbt.contracts.graph.unparsed import UnparsedMacro
    @@ -51,14 +51,14 @@ def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[Macr
                     )
                     if isinstance(t, jinja.BlockTag)
                 ]
    -        except ParsingException as exc:
    +        except ParsingError as exc:
                 exc.add_node(base_node)
                 raise
     
             for block in blocks:
                 try:
                     ast = jinja.parse(block.full_block)
    -            except ParsingException as e:
    +            except ParsingError as e:
                     e.add_node(base_node)
                     raise
     
    @@ -68,7 +68,7 @@ def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[Macr
                 if len(generic_test_nodes) != 1:
                     # things have gone disastrously wrong, we thought we only
                     # parsed one block!
    -                raise ParsingException(
    +                raise ParsingError(
                         f"Found multiple generic tests in {block.full_block}, expected 1",
                         node=base_node,
                     )
    diff --git a/core/dbt/parser/generic_test_builders.py b/core/dbt/parser/generic_test_builders.py
    index af0282c953f..678f7de9df3 100644
    --- a/core/dbt/parser/generic_test_builders.py
    +++ b/core/dbt/parser/generic_test_builders.py
    @@ -22,17 +22,17 @@
         UnparsedExposure,
     )
     from dbt.exceptions import (
    -    CustomMacroPopulatingConfigValues,
    -    SameKeyNested,
    -    TagNotString,
    -    TagsNotListOfStrings,
    -    TestArgIncludesModel,
    -    TestArgsNotDict,
    -    TestDefinitionDictLength,
    -    TestInvalidType,
    -    TestNameNotString,
    -    UnexpectedTestNamePattern,
    -    UndefinedMacroException,
    +    CustomMacroPopulatingConfigValueError,
    +    SameKeyNestedError,
    +    TagNotStringError,
    +    TagsNotListOfStringsError,
    +    TestArgIncludesModelError,
    +    TestArgsNotDictError,
    +    TestDefinitionDictLengthError,
    +    TestTypeError,
    +    TestNameNotStringError,
    +    UnexpectedTestNamePatternError,
    +    UndefinedMacroError,
     )
     from dbt.parser.search import FileBlock
     
    @@ -234,7 +234,7 @@ def __init__(
             test_name, test_args = self.extract_test_args(test, column_name)
             self.args: Dict[str, Any] = test_args
             if "model" in self.args:
    -            raise TestArgIncludesModel()
    +            raise TestArgIncludesModelError()
             self.package_name: str = package_name
             self.target: Testable = target
     
    @@ -242,7 +242,7 @@ def __init__(
     
             match = self.TEST_NAME_PATTERN.match(test_name)
             if match is None:
    -            raise UnexpectedTestNamePattern(test_name)
    +            raise UnexpectedTestNamePatternError(test_name)
     
             groups = match.groupdict()
             self.name: str = groups["test_name"]
    @@ -259,20 +259,20 @@ def __init__(
                 value = self.args.pop(key, None)
                 # 'modifier' config could be either top level arg or in config
                 if value and "config" in self.args and key in self.args["config"]:
    -                raise SameKeyNested()
    +                raise SameKeyNestedError()
                 if not value and "config" in self.args:
                     value = self.args["config"].pop(key, None)
                 if isinstance(value, str):
     
                     try:
                         value = get_rendered(value, render_ctx, native=True)
    -                except UndefinedMacroException as e:
    -                    raise CustomMacroPopulatingConfigValues(
    +                except UndefinedMacroError as e:
    +                    raise CustomMacroPopulatingConfigValueError(
                             target_name=self.target.name,
                             column_name=column_name,
                             name=self.name,
                             key=key,
    -                        err_msg=e.msg
    +                        err_msg=e.msg,
                         )
     
                 if value is not None:
    @@ -310,7 +310,7 @@ def _bad_type(self) -> TypeError:
         @staticmethod
         def extract_test_args(test, name=None) -> Tuple[str, Dict[str, Any]]:
             if not isinstance(test, dict):
    -            raise TestInvalidType(test)
    +            raise TestTypeError(test)
     
             # If the test is a dictionary with top-level keys, the test name is "test_name"
             # and the rest are arguments
    @@ -324,13 +324,13 @@ def extract_test_args(test, name=None) -> Tuple[str, Dict[str, Any]]:
             else:
                 test = list(test.items())
                 if len(test) != 1:
    -                raise TestDefinitionDictLength(test)
    +                raise TestDefinitionDictLengthError(test)
                 test_name, test_args = test[0]
     
             if not isinstance(test_args, dict):
    -            raise TestArgsNotDict(test_args)
    +            raise TestArgsNotDictError(test_args)
             if not isinstance(test_name, str):
    -            raise TestNameNotString(test_name)
    +            raise TestNameNotStringError(test_name)
             test_args = deepcopy(test_args)
             if name is not None:
                 test_args["column_name"] = name
    @@ -421,10 +421,10 @@ def tags(self) -> List[str]:
             if isinstance(tags, str):
                 tags = [tags]
             if not isinstance(tags, list):
    -            raise TagsNotListOfStrings(tags)
    +            raise TagsNotListOfStringsError(tags)
             for tag in tags:
                 if not isinstance(tag, str):
    -                raise TagNotString(tag)
    +                raise TagNotStringError(tag)
             return tags[:]
     
         def macro_name(self) -> str:
    diff --git a/core/dbt/parser/hooks.py b/core/dbt/parser/hooks.py
    index d05ea136dc5..d96257a0e71 100644
    --- a/core/dbt/parser/hooks.py
    +++ b/core/dbt/parser/hooks.py
    @@ -4,7 +4,7 @@
     from dbt.context.context_config import ContextConfig
     from dbt.contracts.files import FilePath
     from dbt.contracts.graph.nodes import HookNode
    -from dbt.exceptions import InternalException
    +from dbt.exceptions import DbtInternalError
     from dbt.node_types import NodeType, RunHookType
     from dbt.parser.base import SimpleParser
     from dbt.parser.search import FileBlock
    @@ -46,7 +46,7 @@ def get_hook_defs(self) -> List[str]:
             elif self.hook_type == RunHookType.End:
                 hooks = self.project.on_run_end
             else:
    -            raise InternalException(
    +            raise DbtInternalError(
                     'hook_type must be one of "{}" or "{}" (got {})'.format(
                         RunHookType.Start, RunHookType.End, self.hook_type
                     )
    diff --git a/core/dbt/parser/macros.py b/core/dbt/parser/macros.py
    index 7c5336b8ccf..1a9ee03d57d 100644
    --- a/core/dbt/parser/macros.py
    +++ b/core/dbt/parser/macros.py
    @@ -6,7 +6,7 @@
     from dbt.contracts.graph.unparsed import UnparsedMacro
     from dbt.contracts.graph.nodes import Macro
     from dbt.contracts.files import FilePath, SourceFile
    -from dbt.exceptions import ParsingException
    +from dbt.exceptions import ParsingError
     from dbt.events.functions import fire_event
     from dbt.events.types import MacroFileParse
     from dbt.node_types import NodeType
    @@ -56,14 +56,14 @@ def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[Macro]:
                     )
                     if isinstance(t, jinja.BlockTag)
                 ]
    -        except ParsingException as exc:
    +        except ParsingError as exc:
                 exc.add_node(base_node)
                 raise
     
             for block in blocks:
                 try:
                     ast = jinja.parse(block.full_block)
    -            except ParsingException as e:
    +            except ParsingError as e:
                     e.add_node(base_node)
                     raise
     
    @@ -72,7 +72,7 @@ def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[Macro]:
                 if len(macro_nodes) != 1:
                     # things have gone disastrously wrong, we thought we only
                     # parsed one block!
    -                raise ParsingException(
    +                raise ParsingError(
                         f"Found multiple macros in {block.full_block}, expected 1", node=base_node
                     )
     
    diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py
    index 48eb9fca9c5..944c4ca01ee 100644
    --- a/core/dbt/parser/manifest.py
    +++ b/core/dbt/parser/manifest.py
    @@ -7,7 +7,9 @@
     from typing import Dict, Optional, Mapping, Callable, Any, List, Type, Union, Tuple
     from itertools import chain
     import time
    +from dbt.events.base_types import EventLevel
     import json
    +import pprint
     
     import dbt.exceptions
     import dbt.tracking
    @@ -22,26 +24,17 @@
     from dbt.helper_types import PathSet
     from dbt.events.functions import fire_event, get_invocation_id, warn_or_error
     from dbt.events.types import (
    +    PartialParsingErrorProcessingFile,
    +    PartialParsingError,
         ParseCmdPerfInfoPath,
    -    PartialParsingFullReparseBecauseOfError,
    -    PartialParsingExceptionFile,
    -    PartialParsingFile,
    -    PartialParsingException,
         PartialParsingSkipParsing,
    -    PartialParsingMacroChangeStartFullParse,
    -    ManifestWrongMetadataVersion,
    -    PartialParsingVersionMismatch,
    -    PartialParsingFailedBecauseConfigChange,
    -    PartialParsingFailedBecauseProfileChange,
    -    PartialParsingFailedBecauseNewProjectDependency,
    -    PartialParsingFailedBecauseHashChanged,
    +    UnableToPartialParse,
         PartialParsingNotEnabled,
         ParsedFileLoadFailed,
    -    PartialParseSaveFileNotFound,
         InvalidDisabledTargetInTestNode,
    -    PartialParsingProjectEnvVarsChanged,
    -    PartialParsingProfileEnvVarsChanged,
         NodeNotFoundOrDisabled,
    +    StateCheckVarsHash,
    +    Note,
     )
     from dbt.logger import DbtProcessState
     from dbt.node_types import NodeType
    @@ -74,7 +67,7 @@
         ResultNode,
     )
     from dbt.contracts.util import Writable
    -from dbt.exceptions import TargetNotFound, AmbiguousAlias
    +from dbt.exceptions import TargetNotFoundError, AmbiguousAliasError
     from dbt.parser.base import Parser
     from dbt.parser.analysis import AnalysisParser
     from dbt.parser.generic_test import GenericTestParser
    @@ -269,7 +262,11 @@ def load(self):
                     except Exception as exc:
                         # pp_files should still be the full set and manifest is new manifest,
                         # since get_parsing_files failed
    -                    fire_event(PartialParsingFullReparseBecauseOfError())
    +                    fire_event(
    +                        UnableToPartialParse(
    +                            reason="an error occurred. Switching to full reparse."
    +                        )
    +                    )
     
                         # Get traceback info
                         tb_info = traceback.format_exc()
    @@ -293,10 +290,9 @@ def load(self):
                                 source_file = self.manifest.files[file_id]
                             if source_file:
                                 parse_file_type = source_file.parse_file_type
    -                            fire_event(PartialParsingExceptionFile(file=file_id))
    -                            fire_event(PartialParsingFile(file_id=source_file.file_id))
    +                            fire_event(PartialParsingErrorProcessingFile(file=file_id))
                         exc_info["parse_file_type"] = parse_file_type
    -                    fire_event(PartialParsingException(exc_info=exc_info))
    +                    fire_event(PartialParsingError(exc_info=exc_info))
     
                         # Send event
                         if dbt.tracking.active_user is not None:
    @@ -321,7 +317,11 @@ def load(self):
     
                 # If we're partially parsing check that certain macros have not been changed
                 if self.partially_parsing and self.skip_partial_parsing_because_of_macros():
    -                fire_event(PartialParsingMacroChangeStartFullParse())
    +                fire_event(
    +                    UnableToPartialParse(
    +                        reason="change detected to override macro. Starting full parse."
    +                    )
    +                )
     
                     # Get new Manifest with original file records and move over the macros
                     self.manifest = self.new_manifest  # contains newly read files
    @@ -553,7 +553,7 @@ def write_manifest_for_partial_parse(self):
                 # saved manifest not matching the code version.
                 if self.manifest.metadata.dbt_version != __version__:
                     fire_event(
    -                    ManifestWrongMetadataVersion(version=self.manifest.metadata.dbt_version)
    +                    UnableToPartialParse(reason="saved manifest contained the wrong version")
                     )
                     self.manifest.metadata.dbt_version = __version__
                 manifest_msgpack = self.manifest.to_msgpack()
    @@ -572,35 +572,43 @@ def is_partial_parsable(self, manifest: Manifest) -> Tuple[bool, Optional[str]]:
     
             if manifest.metadata.dbt_version != __version__:
                 # #3757 log both versions because of reports of invalid cases of mismatch.
    -            fire_event(
    -                PartialParsingVersionMismatch(
    -                    saved_version=manifest.metadata.dbt_version, current_version=__version__
    -                )
    -            )
    +            fire_event(UnableToPartialParse(reason="of a version mismatch"))
                 # If the version is wrong, the other checks might not work
                 return False, ReparseReason.version_mismatch
             if self.manifest.state_check.vars_hash != manifest.state_check.vars_hash:
    -            fire_event(PartialParsingFailedBecauseConfigChange())
    +            fire_event(
    +                UnableToPartialParse(
    +                    reason="config vars, config profile, or config target have changed"
    +                )
    +            )
    +            fire_event(
    +                Note(
    +                    msg=f"previous checksum: {self.manifest.state_check.vars_hash.checksum}, current checksum: {manifest.state_check.vars_hash.checksum}"
    +                ),
    +                level=EventLevel.DEBUG,
    +            )
                 valid = False
                 reparse_reason = ReparseReason.vars_changed
             if self.manifest.state_check.profile_hash != manifest.state_check.profile_hash:
                 # Note: This should be made more granular. We shouldn't need to invalidate
                 # partial parsing if a non-used profile section has changed.
    -            fire_event(PartialParsingFailedBecauseProfileChange())
    +            fire_event(UnableToPartialParse(reason="profile has changed"))
                 valid = False
                 reparse_reason = ReparseReason.profile_changed
             if (
                 self.manifest.state_check.project_env_vars_hash
                 != manifest.state_check.project_env_vars_hash
             ):
    -            fire_event(PartialParsingProjectEnvVarsChanged())
    +            fire_event(
    +                UnableToPartialParse(reason="env vars used in dbt_project.yml have changed")
    +            )
                 valid = False
                 reparse_reason = ReparseReason.proj_env_vars_changed
             if (
                 self.manifest.state_check.profile_env_vars_hash
                 != manifest.state_check.profile_env_vars_hash
             ):
    -            fire_event(PartialParsingProfileEnvVarsChanged())
    +            fire_event(UnableToPartialParse(reason="env vars used in profiles.yml have changed"))
                 valid = False
                 reparse_reason = ReparseReason.prof_env_vars_changed
     
    @@ -610,7 +618,7 @@ def is_partial_parsable(self, manifest: Manifest) -> Tuple[bool, Optional[str]]:
                 if k not in manifest.state_check.project_hashes
             }
             if missing_keys:
    -            fire_event(PartialParsingFailedBecauseNewProjectDependency())
    +            fire_event(UnableToPartialParse(reason="a project dependency has been added"))
                 valid = False
                 reparse_reason = ReparseReason.deps_changed
     
    @@ -618,7 +626,7 @@ def is_partial_parsable(self, manifest: Manifest) -> Tuple[bool, Optional[str]]:
                 if key in manifest.state_check.project_hashes:
                     old_value = manifest.state_check.project_hashes[key]
                     if new_value != old_value:
    -                    fire_event(PartialParsingFailedBecauseHashChanged())
    +                    fire_event(UnableToPartialParse(reason="a project config has changed"))
                         valid = False
                         reparse_reason = ReparseReason.project_config_changed
             return valid, reparse_reason
    @@ -671,7 +679,9 @@ def read_manifest_for_partial_parse(self) -> Optional[Manifest]:
                     )
                     reparse_reason = ReparseReason.load_file_failure
             else:
    -            fire_event(PartialParseSaveFileNotFound())
    +            fire_event(
    +                UnableToPartialParse(reason="saved manifest not found. Starting full parse.")
    +            )
                 reparse_reason = ReparseReason.file_not_found
     
             # this event is only fired if a full reparse is needed
    @@ -710,16 +720,28 @@ def build_manifest_state_check(self):
             # arg vars, but since any changes to that file will cause state_check
             # to not pass, it doesn't matter.  If we move to more granular checking
             # of env_vars, that would need to change.
    +        # We are using the parsed cli_vars instead of config.args.vars, in order
    +        # to sort them and avoid reparsing because of ordering issues.
    +        stringified_cli_vars = pprint.pformat(config.cli_vars)
             vars_hash = FileHash.from_contents(
                 "\x00".join(
                     [
    -                    str(getattr(config.args, "vars", "{}") or "{}"),
    +                    stringified_cli_vars,
                         getattr(config.args, "profile", "") or "",
                         getattr(config.args, "target", "") or "",
                         __version__,
                     ]
                 )
             )
    +        fire_event(
    +            StateCheckVarsHash(
    +                checksum=vars_hash.checksum,
    +                vars=stringified_cli_vars,
    +                profile=config.args.profile,
    +                target=config.args.target,
    +                version=__version__,
    +            )
    +        )
     
             # Create a FileHash of the env_vars in the project
             key_list = list(config.project_env_vars.keys())
    @@ -975,19 +997,20 @@ def invalid_target_fail_unless_test(
         target_kind: str,
         target_package: Optional[str] = None,
         disabled: Optional[bool] = None,
    +    should_warn_if_disabled: bool = True,
     ):
         if node.resource_type == NodeType.Test:
             if disabled:
    -            fire_event(
    -                InvalidDisabledTargetInTestNode(
    -                    resource_type_title=node.resource_type.title(),
    -                    unique_id=node.unique_id,
    -                    original_file_path=node.original_file_path,
    -                    target_kind=target_kind,
    -                    target_name=target_name,
    -                    target_package=target_package if target_package else "",
    -                )
    +            event = InvalidDisabledTargetInTestNode(
    +                resource_type_title=node.resource_type.title(),
    +                unique_id=node.unique_id,
    +                original_file_path=node.original_file_path,
    +                target_kind=target_kind,
    +                target_name=target_name,
    +                target_package=target_package if target_package else "",
                 )
    +
    +            fire_event(event, EventLevel.WARN if should_warn_if_disabled else None)
             else:
                 warn_or_error(
                     NodeNotFoundOrDisabled(
    @@ -1001,7 +1024,7 @@ def invalid_target_fail_unless_test(
                     )
                 )
         else:
    -        raise TargetNotFound(
    +        raise TargetNotFoundError(
                 node=node,
                 target_name=target_name,
                 target_kind=target_kind,
    @@ -1029,11 +1052,13 @@ def _check_resource_uniqueness(
     
             existing_node = names_resources.get(name)
             if existing_node is not None:
    -            raise dbt.exceptions.DuplicateResourceName(existing_node, node)
    +            raise dbt.exceptions.DuplicateResourceNameError(existing_node, node)
     
             existing_alias = alias_resources.get(full_node_name)
             if existing_alias is not None:
    -            raise AmbiguousAlias(node_1=existing_alias, node_2=node, duped_name=full_node_name)
    +            raise AmbiguousAliasError(
    +                node_1=existing_alias, node_2=node, duped_name=full_node_name
    +            )
     
             names_resources[name] = node
             alias_resources[full_node_name] = node
    @@ -1125,7 +1150,7 @@ def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposur
             elif len(ref) == 2:
                 target_model_package, target_model_name = ref
             else:
    -            raise dbt.exceptions.InternalException(
    +            raise dbt.exceptions.DbtInternalError(
                     f"Refs should always be 1 or 2 arguments - got {len(ref)}"
                 )
     
    @@ -1146,6 +1171,7 @@ def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposur
                     target_kind="node",
                     target_package=target_model_package,
                     disabled=(isinstance(target_model, Disabled)),
    +                should_warn_if_disabled=False,
                 )
     
                 continue
    @@ -1168,7 +1194,7 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: M
             elif len(ref) == 2:
                 target_model_package, target_model_name = ref
             else:
    -            raise dbt.exceptions.InternalException(
    +            raise dbt.exceptions.DbtInternalError(
                     f"Refs should always be 1 or 2 arguments - got {len(ref)}"
                 )
     
    @@ -1189,6 +1215,7 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: M
                     target_kind="node",
                     target_package=target_model_package,
                     disabled=(isinstance(target_model, Disabled)),
    +                should_warn_if_disabled=False,
                 )
                 continue
     
    @@ -1218,7 +1245,7 @@ def _process_metrics_for_node(
             elif len(metric) == 2:
                 target_metric_package, target_metric_name = metric
             else:
    -            raise dbt.exceptions.InternalException(
    +            raise dbt.exceptions.DbtInternalError(
                     f"Metric references should always be 1 or 2 arguments - got {len(metric)}"
                 )
     
    @@ -1263,7 +1290,7 @@ def _process_refs_for_node(manifest: Manifest, current_project: str, node: Manif
             elif len(ref) == 2:
                 target_model_package, target_model_name = ref
             else:
    -            raise dbt.exceptions.InternalException(
    +            raise dbt.exceptions.DbtInternalError(
                     f"Refs should always be 1 or 2 arguments - got {len(ref)}"
                 )
     
    @@ -1284,6 +1311,7 @@ def _process_refs_for_node(manifest: Manifest, current_project: str, node: Manif
                     target_kind="node",
                     target_package=target_model_package,
                     disabled=(isinstance(target_model, Disabled)),
    +                should_warn_if_disabled=False,
                 )
                 continue
     
    diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py
    index 39bb18be714..710df10f145 100644
    --- a/core/dbt/parser/models.py
    +++ b/core/dbt/parser/models.py
    @@ -1,19 +1,10 @@
     from copy import deepcopy
     from dbt.context.context_config import ContextConfig
     from dbt.contracts.graph.nodes import ModelNode
    -import dbt.flags as flags
    +from dbt.events.base_types import EventLevel
    +from dbt.events.types import Note
     from dbt.events.functions import fire_event
    -from dbt.events.types import (
    -    StaticParserCausedJinjaRendering,
    -    UsingExperimentalParser,
    -    SampleFullJinjaRendering,
    -    StaticParserFallbackJinjaRendering,
    -    StaticParsingMacroOverrideDetected,
    -    StaticParserSuccess,
    -    StaticParserFailure,
    -    ExperimentalParserSuccess,
    -    ExperimentalParserFailure,
    -)
    +import dbt.flags as flags
     from dbt.node_types import NodeType, ModelLanguage
     from dbt.parser.base import SimpleSQLParser
     from dbt.parser.search import FileBlock
    @@ -30,11 +21,11 @@
     import ast
     from dbt.dataclass_schema import ValidationError
     from dbt.exceptions import (
    -    InvalidModelConfig,
    -    ParsingException,
    -    PythonLiteralEval,
    -    PythonParsingException,
    -    UndefinedMacroException,
    +    ModelConfigError,
    +    ParsingError,
    +    PythonLiteralEvalError,
    +    PythonParsingError,
    +    UndefinedMacroError,
     )
     
     dbt_function_key_words = set(["ref", "source", "config", "get"])
    @@ -66,13 +57,13 @@ def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
     
         def check_error(self, node):
             if self.num_model_def != 1:
    -            raise ParsingException(
    +            raise ParsingError(
                     f"dbt allows exactly one model defined per python file, found {self.num_model_def}",
                     node=node,
                 )
     
             if len(self.dbt_errors) != 0:
    -            raise ParsingException("\n".join(self.dbt_errors), node=node)
    +            raise ParsingError("\n".join(self.dbt_errors), node=node)
     
     
     class PythonParseVisitor(ast.NodeVisitor):
    @@ -96,7 +87,7 @@ def _safe_eval(self, node):
             try:
                 return ast.literal_eval(node)
             except (SyntaxError, ValueError, TypeError, MemoryError, RecursionError) as exc:
    -            raise PythonLiteralEval(exc, node=self.dbt_node) from exc
    +            raise PythonLiteralEvalError(exc, node=self.dbt_node) from exc
     
         def _get_call_literals(self, node):
             # List of literals
    @@ -176,9 +167,9 @@ def verify_python_model_code(node):
                 node,
             )
             if rendered_python != node.raw_code:
    -            raise ParsingException("")
    -    except (UndefinedMacroException, ParsingException):
    -        raise ParsingException("No jinja in python model code is allowed", node=node)
    +            raise ParsingError("")
    +    except (UndefinedMacroError, ParsingError):
    +        raise ParsingError("No jinja in python model code is allowed", node=node)
     
     
     class ModelParser(SimpleSQLParser[ModelNode]):
    @@ -202,7 +193,7 @@ def parse_python_model(self, node, config, context):
             try:
                 tree = ast.parse(node.raw_code, filename=node.original_file_path)
             except SyntaxError as exc:
    -            raise PythonParsingException(exc, node=node) from exc
    +            raise PythonParsingError(exc, node=node) from exc
     
             # Only parse if AST tree has instructions in body
             if tree.body:
    @@ -219,12 +210,12 @@ def parse_python_model(self, node, config, context):
                     if func == "get":
                         num_args = len(args)
                         if num_args == 0:
    -                        raise ParsingException(
    +                        raise ParsingError(
                                 "dbt.config.get() requires at least one argument",
                                 node=node,
                             )
                         if num_args > 2:
    -                        raise ParsingException(
    +                        raise ParsingError(
                                 f"dbt.config.get() takes at most 2 arguments ({num_args} given)",
                                 node=node,
                             )
    @@ -255,13 +246,16 @@ def render_update(self, node: ModelNode, config: ContextConfig) -> None:
     
                 except ValidationError as exc:
                     # we got a ValidationError - probably bad types in config()
    -                raise InvalidModelConfig(exc, node=node) from exc
    +                raise ModelConfigError(exc, node=node) from exc
                 return
     
             elif not flags.STATIC_PARSER:
                 # jinja rendering
                 super().render_update(node, config)
    -            fire_event(StaticParserCausedJinjaRendering(path=node.path))
    +            fire_event(
    +                Note(f"1605: jinja rendering because of STATIC_PARSER flag. file: {node.path}"),
    +                EventLevel.DEBUG,
    +            )
                 return
     
             # only sample for experimental parser correctness on normal runs,
    @@ -295,7 +289,10 @@ def render_update(self, node: ModelNode, config: ContextConfig) -> None:
     
             # sample the experimental parser only during a normal run
             if exp_sample and not flags.USE_EXPERIMENTAL_PARSER:
    -            fire_event(UsingExperimentalParser(path=node.path))
    +            fire_event(
    +                Note(f"1610: conducting experimental parser sample on {node.path}"),
    +                EventLevel.DEBUG,
    +            )
                 experimental_sample = self.run_experimental_parser(node)
                 # if the experimental parser succeeded, make a full copy of model parser
                 # and populate _everything_ into it so it can be compared apples-to-apples
    @@ -325,7 +322,10 @@ def render_update(self, node: ModelNode, config: ContextConfig) -> None:
                 # sampling rng here, but the effect would be the same since we would only roll
                 # it 40% of the time. So I've opted to keep all the rng code colocated above.
                 if stable_sample and not flags.USE_EXPERIMENTAL_PARSER:
    -                fire_event(SampleFullJinjaRendering(path=node.path))
    +                fire_event(
    +                    Note(f"1611: conducting full jinja rendering sample on {node.path}"),
    +                    EventLevel.DEBUG,
    +                )
                     # if this will _never_ mutate anything `self` we could avoid these deep copies,
                     # but we can't really guarantee that going forward.
                     model_parser_copy = self.partial_deepcopy()
    @@ -360,7 +360,9 @@ def render_update(self, node: ModelNode, config: ContextConfig) -> None:
             else:
                 # jinja rendering
                 super().render_update(node, config)
    -            fire_event(StaticParserFallbackJinjaRendering(path=node.path))
    +            fire_event(
    +                Note(f"1602: parser fallback to jinja rendering on {node.path}"), EventLevel.DEBUG
    +            )
     
                 # if sampling, add the correct messages for tracking
                 if exp_sample and isinstance(experimental_sample, str):
    @@ -396,19 +398,26 @@ def run_static_parser(self, node: ModelNode) -> Optional[Union[str, Dict[str, Li
                 # this log line is used for integration testing. If you change
                 # the code at the beginning of the line change the tests in
                 # test/integration/072_experimental_parser_tests/test_all_experimental_parser.py
    -            fire_event(StaticParsingMacroOverrideDetected(path=node.path))
    +            fire_event(
    +                Note(
    +                    f"1601: detected macro override of ref/source/config in the scope of {node.path}"
    +                ),
    +                EventLevel.DEBUG,
    +            )
                 return "has_banned_macro"
     
             # run the stable static parser and return the results
             try:
                 statically_parsed = py_extract_from_source(node.raw_code)
    -            fire_event(StaticParserSuccess(path=node.path))
    +            fire_event(
    +                Note(f"1699: static parser successfully parsed {node.path}"), EventLevel.DEBUG
    +            )
                 return _shift_sources(statically_parsed)
             # if we want information on what features are barring the static
             # parser from reading model files, this is where we would add that
             # since that information is stored in the `ExtractionError`.
             except ExtractionError:
    -            fire_event(StaticParserFailure(path=node.path))
    +            fire_event(Note(f"1603: static parser failed on {node.path}"), EventLevel.DEBUG)
                 return "cannot_parse"
     
         def run_experimental_parser(
    @@ -419,7 +428,12 @@ def run_experimental_parser(
                 # this log line is used for integration testing. If you change
                 # the code at the beginning of the line change the tests in
                 # test/integration/072_experimental_parser_tests/test_all_experimental_parser.py
    -            fire_event(StaticParsingMacroOverrideDetected(path=node.path))
    +            fire_event(
    +                Note(
    +                    f"1601: detected macro override of ref/source/config in the scope of {node.path}"
    +                ),
    +                EventLevel.DEBUG,
    +            )
                 return "has_banned_macro"
     
             # run the experimental parser and return the results
    @@ -428,13 +442,16 @@ def run_experimental_parser(
                 # experimental features. Change `py_extract_from_source` to the new
                 # experimental call when we add additional features.
                 experimentally_parsed = py_extract_from_source(node.raw_code)
    -            fire_event(ExperimentalParserSuccess(path=node.path))
    +            fire_event(
    +                Note(f"1698: experimental parser successfully parsed {node.path}"),
    +                EventLevel.DEBUG,
    +            )
                 return _shift_sources(experimentally_parsed)
             # if we want information on what features are barring the experimental
             # parser from reading model files, this is where we would add that
             # since that information is stored in the `ExtractionError`.
             except ExtractionError:
    -            fire_event(ExperimentalParserFailure(path=node.path))
    +            fire_event(Note(f"1604: experimental parser failed on {node.path}"), EventLevel.DEBUG)
                 return "cannot_parse"
     
         # checks for banned macros
    diff --git a/core/dbt/parser/partial.py b/core/dbt/parser/partial.py
    index 63ef33429c4..d6afe223278 100644
    --- a/core/dbt/parser/partial.py
    +++ b/core/dbt/parser/partial.py
    @@ -8,18 +8,10 @@
         parse_file_type_to_parser,
     )
     from dbt.events.functions import fire_event
    +from dbt.events.base_types import EventLevel
     from dbt.events.types import (
         PartialParsingEnabled,
    -    PartialParsingAddedFile,
    -    PartialParsingDeletedFile,
    -    PartialParsingUpdatedFile,
    -    PartialParsingNodeMissingInSourceFile,
    -    PartialParsingMissingNodes,
    -    PartialParsingChildMapMissingUniqueID,
    -    PartialParsingUpdateSchemaFile,
    -    PartialParsingDeletedSource,
    -    PartialParsingDeletedExposure,
    -    PartialParsingDeletedMetric,
    +    PartialParsingFile,
     )
     from dbt.constants import DEFAULT_ENV_PLACEHOLDER
     from dbt.node_types import NodeType
    @@ -164,7 +156,11 @@ def build_file_diff(self):
                 self.macro_child_map = self.saved_manifest.build_macro_child_map()
             deleted = len(deleted) + len(deleted_schema_files)
             changed = len(changed) + len(changed_schema_files)
    -        fire_event(PartialParsingEnabled(deleted=deleted, added=len(added), changed=changed))
    +        event = PartialParsingEnabled(deleted=deleted, added=len(added), changed=changed)
    +        if os.environ.get("DBT_PP_TEST"):
    +            fire_event(event, level=EventLevel.INFO)
    +        else:
    +            fire_event(event)
             self.file_diff = file_diff
     
         # generate the list of files that need parsing
    @@ -234,7 +230,7 @@ def add_to_saved(self, file_id):
             self.saved_files[file_id] = source_file
             # update pp_files to parse
             self.add_to_pp_files(source_file)
    -        fire_event(PartialParsingAddedFile(file_id=file_id))
    +        fire_event(PartialParsingFile(operation="added", file_id=file_id))
     
         def handle_added_schema_file(self, source_file):
             source_file.pp_dict = source_file.dict_from_yaml.copy()
    @@ -282,7 +278,7 @@ def delete_from_saved(self, file_id):
             if saved_source_file.parse_file_type == ParseFileType.Documentation:
                 self.delete_doc_node(saved_source_file)
     
    -        fire_event(PartialParsingDeletedFile(file_id=file_id))
    +        fire_event(PartialParsingFile(operation="deleted", file_id=file_id))
     
         # Updates for non-schema files
         def update_in_saved(self, file_id):
    @@ -297,7 +293,7 @@ def update_in_saved(self, file_id):
                 self.update_doc_in_saved(new_source_file, old_source_file)
             else:
                 raise Exception(f"Invalid parse_file_type in source_file {file_id}")
    -        fire_event(PartialParsingUpdatedFile(file_id=file_id))
    +        fire_event(PartialParsingFile(operation="updated", file_id=file_id))
     
         # Models, seeds, snapshots: patches and tests
         # analyses: patches, no tests
    @@ -312,10 +308,6 @@ def update_mssat_in_saved(self, new_source_file, old_source_file):
             unique_ids = []
             if old_source_file.nodes:
                 unique_ids = old_source_file.nodes
    -        else:
    -            # It's not clear when this would actually happen.
    -            # Logging in case there are other associated errors.
    -            fire_event(PartialParsingNodeMissingInSourceFile(file_id=old_source_file.file_id))
     
             # replace source_file in saved and add to parsing list
             file_id = new_source_file.file_id
    @@ -386,7 +378,6 @@ def remove_mssat_file(self, source_file):
             # nodes [unique_ids] -- SQL files
             # There should always be a node for a SQL file
             if not source_file.nodes:
    -            fire_event(PartialParsingMissingNodes(file_id=source_file.file_id))
                 return
             # There is generally only 1 node for SQL files, except for macros and snapshots
             for unique_id in source_file.nodes:
    @@ -398,8 +389,6 @@ def schedule_referencing_nodes_for_parsing(self, unique_id):
             # Look at "children", i.e. nodes that reference this node
             if unique_id in self.saved_manifest.child_map:
                 self.schedule_nodes_for_parsing(self.saved_manifest.child_map[unique_id])
    -        else:
    -            fire_event(PartialParsingChildMapMissingUniqueID(unique_id=unique_id))
     
         def schedule_nodes_for_parsing(self, unique_ids):
             for unique_id in unique_ids:
    @@ -611,7 +600,7 @@ def change_schema_file(self, file_id):
             # schedule parsing
             self.add_to_pp_files(saved_schema_file)
             # schema_file pp_dict should have been generated already
    -        fire_event(PartialParsingUpdateSchemaFile(file_id=file_id))
    +        fire_event(PartialParsingFile(operation="updated", file_id=file_id))
     
         # Delete schema files -- a variation on change_schema_file
         def delete_schema_file(self, file_id):
    @@ -883,7 +872,6 @@ def delete_schema_source(self, schema_file, source_dict):
                         self.deleted_manifest.sources[unique_id] = source
                         schema_file.sources.remove(unique_id)
                         self.schedule_referencing_nodes_for_parsing(unique_id)
    -                    fire_event(PartialParsingDeletedSource(unique_id=unique_id))
     
         def delete_schema_macro_patch(self, schema_file, macro):
             # This is just macro patches that need to be reapplied
    @@ -912,7 +900,6 @@ def delete_schema_exposure(self, schema_file, exposure_dict):
                             unique_id
                         )
                         schema_file.exposures.remove(unique_id)
    -                    fire_event(PartialParsingDeletedExposure(unique_id=unique_id))
                 elif unique_id in self.saved_manifest.disabled:
                     self.delete_disabled(unique_id, schema_file.file_id)
     
    @@ -931,7 +918,6 @@ def delete_schema_metric(self, schema_file, metric_dict):
                             unique_id
                         )
                         schema_file.metrics.remove(unique_id)
    -                    fire_event(PartialParsingDeletedMetric(unique_id=unique_id))
                 elif unique_id in self.saved_manifest.disabled:
                     self.delete_disabled(unique_id, schema_file.file_id)
     
    diff --git a/core/dbt/parser/read_files.py b/core/dbt/parser/read_files.py
    index ccb6b1b0790..531e5f39560 100644
    --- a/core/dbt/parser/read_files.py
    +++ b/core/dbt/parser/read_files.py
    @@ -12,7 +12,7 @@
     )
     
     from dbt.parser.schemas import yaml_from_file, schema_file_keys, check_format_version
    -from dbt.exceptions import ParsingException
    +from dbt.exceptions import ParsingError
     from dbt.parser.search import filesystem_search
     from typing import Optional
     
    @@ -75,21 +75,21 @@ def validate_yaml(file_path, dct):
                         f"The schema file at {file_path} is "
                         f"invalid because the value of '{key}' is not a list"
                     )
    -                raise ParsingException(msg)
    +                raise ParsingError(msg)
                 for element in dct[key]:
                     if not isinstance(element, dict):
                         msg = (
                             f"The schema file at {file_path} is "
                             f"invalid because a list element for '{key}' is not a dictionary"
                         )
    -                    raise ParsingException(msg)
    +                    raise ParsingError(msg)
                     if "name" not in element:
                         msg = (
                             f"The schema file at {file_path} is "
                             f"invalid because a list element for '{key}' does not have a "
                             "name attribute."
                         )
    -                    raise ParsingException(msg)
    +                    raise ParsingError(msg)
     
     
     # Special processing for big seed files
    diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py
    index b5fd8558889..482eb5b6e35 100644
    --- a/core/dbt/parser/schemas.py
    +++ b/core/dbt/parser/schemas.py
    @@ -50,25 +50,25 @@
         UnparsedSourceDefinition,
     )
     from dbt.exceptions import (
    -    CompilationException,
    -    DuplicateMacroPatchName,
    -    DuplicatePatchPath,
    -    DuplicateSourcePatchName,
    -    JSONValidationException,
    -    InternalException,
    -    InvalidSchemaConfig,
    -    InvalidTestConfig,
    -    ParsingException,
    -    PropertyYMLInvalidTag,
    -    PropertyYMLMissingVersion,
    -    PropertyYMLVersionNotInt,
    -    ValidationException,
    -    YamlLoadFailure,
    -    YamlParseDictFailure,
    -    YamlParseListFailure,
    +    CompilationError,
    +    DuplicateMacroPatchNameError,
    +    DuplicatePatchPathError,
    +    DuplicateSourcePatchNameError,
    +    JSONValidationError,
    +    DbtInternalError,
    +    SchemaConfigError,
    +    TestConfigError,
    +    ParsingError,
    +    PropertyYMLInvalidTagError,
    +    PropertyYMLMissingVersionError,
    +    PropertyYMLVersionNotIntError,
    +    DbtValidationError,
    +    YamlLoadError,
    +    YamlParseDictError,
    +    YamlParseListError,
     )
     from dbt.events.functions import warn_or_error
    -from dbt.events.types import WrongResourceSchemaFile, NoNodeForYamlKey, MacroPatchNotFound
    +from dbt.events.types import WrongResourceSchemaFile, NoNodeForYamlKey, MacroNotFoundForPatch
     from dbt.node_types import NodeType
     from dbt.parser.base import SimpleParser
     from dbt.parser.search import FileBlock
    @@ -99,11 +99,13 @@
     
     def yaml_from_file(source_file: SchemaSourceFile) -> Dict[str, Any]:
         """If loading the yaml fails, raise an exception."""
    -    path = source_file.path.relative_path
         try:
    -        return load_yaml_text(source_file.contents, source_file.path)
    -    except ValidationException as e:
    -        raise YamlLoadFailure(source_file.project_name, path, e)
    +        # source_file.contents can sometimes be None
    +        return load_yaml_text(source_file.contents or "", source_file.path)
    +    except DbtValidationError as e:
    +        raise YamlLoadError(
    +            project_name=source_file.project_name, path=source_file.path.relative_path, exc=e
    +        )
     
     
     class ParserRef:
    @@ -255,7 +257,7 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List
                     original_file_path=target.original_file_path,
                     raw_code=raw_code,
                 )
    -            raise InvalidTestConfig(exc, node)
    +            raise TestConfigError(exc, node)
     
         # lots of time spent in this method
         def _parse_generic_test(
    @@ -278,20 +280,20 @@ def _parse_generic_test(
                     self.store_env_vars(target, schema_file_id, self.schema_yaml_vars.env_vars)
                     self.schema_yaml_vars.env_vars = {}
     
    -        except ParsingException as exc:
    +        except ParsingError as exc:
                 context = _trimmed(str(target))
                 msg = "Invalid test config given in {}:\n\t{}\n\t@: {}".format(
                     target.original_file_path, exc.msg, context
                 )
    -            raise ParsingException(msg) from exc
    +            raise ParsingError(msg) from exc
     
    -        except CompilationException as exc:
    +        except CompilationError as exc:
                 context = _trimmed(str(target))
                 msg = (
                     "Invalid generic test configuration given in "
                     f"{target.original_file_path}: \n{exc.msg}\n\t@: {context}"
                 )
    -            raise CompilationException(msg) from exc
    +            raise CompilationError(msg) from exc
     
             original_name = os.path.basename(target.original_file_path)
             compiled_path = get_pseudo_test_path(builder.compiled_name, original_name)
    @@ -397,7 +399,7 @@ def render_test_update(self, node, config, builder, schema_file_id):
                     # env_vars should have been updated in the context env_var method
                 except ValidationError as exc:
                     # we got a ValidationError - probably bad types in config()
    -                raise InvalidSchemaConfig(exc, node=node) from exc
    +                raise SchemaConfigError(exc, node=node) from exc
     
         def parse_node(self, block: GenericTestBlock) -> GenericTestNode:
             """In schema parsing, we rewrite most of the part of parse_node that
    @@ -537,16 +539,16 @@ def parse_file(self, block: FileBlock, dct: Dict = None) -> None:
     
     def check_format_version(file_path, yaml_dct) -> None:
         if "version" not in yaml_dct:
    -        raise PropertyYMLMissingVersion(file_path)
    +        raise PropertyYMLMissingVersionError(file_path)
     
         version = yaml_dct["version"]
         # if it's not an integer, the version is malformed, or not
         # set. Either way, only 'version: 2' is supported.
         if not isinstance(version, int):
    -        raise PropertyYMLVersionNotInt(file_path, version)
    +        raise PropertyYMLVersionNotIntError(file_path, version)
     
         if version != 2:
    -        raise PropertyYMLInvalidTag(file_path, version)
    +        raise PropertyYMLInvalidTagError(file_path, version)
     
     
     Parsed = TypeVar("Parsed", UnpatchedSourceDefinition, ParsedNodePatch, ParsedMacroPatch)
    @@ -594,7 +596,7 @@ def root_project(self):
         def get_key_dicts(self) -> Iterable[Dict[str, Any]]:
             data = self.yaml.data.get(self.key, [])
             if not isinstance(data, list):
    -            raise ParsingException(
    +            raise ParsingError(
                     "{} must be a list, got {} instead: ({})".format(
                         self.key, type(data), _trimmed(str(data))
                     )
    @@ -607,12 +609,10 @@ def get_key_dicts(self) -> Iterable[Dict[str, Any]]:
                 # check that entry is a dict and that all dict values
                 # are strings
                 if coerce_dict_str(entry) is None:
    -                raise YamlParseListFailure(
    -                    path, self.key, data, "expected a dict with string keys"
    -                )
    +                raise YamlParseListError(path, self.key, data, "expected a dict with string keys")
     
                 if "name" not in entry:
    -                raise ParsingException("Entry did not contain a name")
    +                raise ParsingError("Entry did not contain a name")
     
                 # Render the data (except for tests and descriptions).
                 # See the SchemaYamlRenderer
    @@ -631,8 +631,8 @@ def render_entry(self, dct):
             try:
                 # This does a deep_map which will fail if there are circular references
                 dct = self.renderer.render_data(dct)
    -        except ParsingException as exc:
    -            raise ParsingException(
    +        except ParsingError as exc:
    +            raise ParsingError(
                     f"Failed to render {self.yaml.file.path.original_file_path} from "
                     f"project {self.project.project_name}: {exc}"
                 ) from exc
    @@ -655,8 +655,8 @@ def _target_from_dict(self, cls: Type[T], data: Dict[str, Any]) -> T:
             try:
                 cls.validate(data)
                 return cls.from_dict(data)
    -        except (ValidationError, JSONValidationException) as exc:
    -            raise YamlParseDictFailure(path, self.key, data, exc)
    +        except (ValidationError, JSONValidationError) as exc:
    +            raise YamlParseDictError(path, self.key, data, exc)
     
         # The other parse method returns TestBlocks. This one doesn't.
         # This takes the yaml dictionaries in 'sources' keys and uses them
    @@ -677,7 +677,7 @@ def parse(self) -> List[TestBlock]:
                     # source patches must be unique
                     key = (patch.overrides, patch.name)
                     if key in self.manifest.source_patches:
    -                    raise DuplicateSourcePatchName(patch, self.manifest.source_patches[key])
    +                    raise DuplicateSourcePatchNameError(patch, self.manifest.source_patches[key])
                     self.manifest.source_patches[key] = patch
                     source_file.source_patches.append(key)
                 else:
    @@ -780,8 +780,8 @@ def get_unparsed_target(self) -> Iterable[NonSourceTarget]:
                         self.normalize_meta_attribute(data, path)
                         self.normalize_docs_attribute(data, path)
                     node = self._target_type().from_dict(data)
    -            except (ValidationError, JSONValidationException) as exc:
    -                raise YamlParseDictFailure(path, self.key, data, exc)
    +            except (ValidationError, JSONValidationError) as exc:
    +                raise YamlParseDictError(path, self.key, data, exc)
                 else:
                     yield node
     
    @@ -790,7 +790,7 @@ def get_unparsed_target(self) -> Iterable[NonSourceTarget]:
         def normalize_attribute(self, data, path, attribute):
             if attribute in data:
                 if "config" in data and attribute in data["config"]:
    -                raise ParsingException(
    +                raise ParsingError(
                         f"""
                         In {path}: found {attribute} dictionary in 'config' dictionary and as top-level key.
                         Remove the top-level key and define it under 'config' dictionary only.
    @@ -858,7 +858,7 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None:
             elif patch.yaml_key == "analyses":
                 unique_id = self.manifest.analysis_lookup.get_unique_id(patch.name, None)
             else:
    -            raise InternalException(
    +            raise DbtInternalError(
                     f"Unexpected yaml_key {patch.yaml_key} for patch in "
                     f"file {source_file.path.original_file_path}"
                 )
    @@ -877,7 +877,7 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None:
                             "unique id cannot be enabled in the schema file. They must be enabled "
                             "in `dbt_project.yml` or in the sql files."
                         )
    -                    raise ParsingException(msg)
    +                    raise ParsingError(msg)
     
                     # all nodes in the disabled dict have the same unique_id so just grab the first one
                     # to append with the uniqe id
    @@ -905,7 +905,7 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None:
             if node:
                 if node.patch_path:
                     package_name, existing_file_path = node.patch_path.split("://")
    -                raise DuplicatePatchPath(patch, existing_file_path)
    +                raise DuplicatePatchPathError(patch, existing_file_path)
     
                 source_file.append_patch(patch.yaml_key, node.unique_id)
                 # re-calculate the node config with the patch config.  Always do this
    @@ -957,11 +957,11 @@ def parse_patch(self, block: TargetBlock[UnparsedMacroUpdate], refs: ParserRef)
             unique_id = f"macro.{patch.package_name}.{patch.name}"
             macro = self.manifest.macros.get(unique_id)
             if not macro:
    -            warn_or_error(MacroPatchNotFound(patch_name=patch.name))
    +            warn_or_error(MacroNotFoundForPatch(patch_name=patch.name))
                 return
             if macro.patch_path:
                 package_name, existing_file_path = macro.patch_path.split("://")
    -            raise DuplicateMacroPatchName(patch, existing_file_path)
    +            raise DuplicateMacroPatchNameError(patch, existing_file_path)
             source_file.macro_patches[patch.name] = unique_id
             macro.patch(patch)
     
    @@ -997,7 +997,7 @@ def parse_exposure(self, unparsed: UnparsedExposure):
             )
     
             if not isinstance(config, ExposureConfig):
    -            raise InternalException(
    +            raise DbtInternalError(
                     f"Calculated a {type(config)} for an exposure, but expected an ExposureConfig"
                 )
     
    @@ -1063,8 +1063,8 @@ def parse(self):
                 try:
                     UnparsedExposure.validate(data)
                     unparsed = UnparsedExposure.from_dict(data)
    -            except (ValidationError, JSONValidationException) as exc:
    -                raise YamlParseDictFailure(self.yaml.path, self.key, data, exc)
    +            except (ValidationError, JSONValidationError) as exc:
    +                raise YamlParseDictError(self.yaml.path, self.key, data, exc)
     
                 self.parse_exposure(unparsed)
     
    @@ -1100,7 +1100,7 @@ def parse_metric(self, unparsed: UnparsedMetric):
             )
     
             if not isinstance(config, MetricConfig):
    -            raise InternalException(
    +            raise DbtInternalError(
                     f"Calculated a {type(config)} for a metric, but expected a MetricConfig"
                 )
     
    @@ -1180,6 +1180,6 @@ def parse(self):
                     UnparsedMetric.validate(data)
                     unparsed = UnparsedMetric.from_dict(data)
     
    -            except (ValidationError, JSONValidationException) as exc:
    -                raise YamlParseDictFailure(self.yaml.path, self.key, data, exc)
    +            except (ValidationError, JSONValidationError) as exc:
    +                raise YamlParseDictError(self.yaml.path, self.key, data, exc)
                 self.parse_metric(unparsed)
    diff --git a/core/dbt/parser/search.py b/core/dbt/parser/search.py
    index f8ccc974be4..75e7fa6636c 100644
    --- a/core/dbt/parser/search.py
    +++ b/core/dbt/parser/search.py
    @@ -7,7 +7,7 @@
     from dbt.clients.system import find_matching
     from dbt.config import Project
     from dbt.contracts.files import FilePath, AnySourceFile
    -from dbt.exceptions import ParsingException, InternalException
    +from dbt.exceptions import ParsingError, DbtInternalError
     
     
     # What's the point of wrapping a SourceFile with this class?
    @@ -73,7 +73,7 @@ def filesystem_search(
         file_path_list = []
         for result in find_matching(root, relative_dirs, ext, ignore_spec):
             if "searched_path" not in result or "relative_path" not in result:
    -            raise InternalException("Invalid result from find_matching: {}".format(result))
    +            raise DbtInternalError("Invalid result from find_matching: {}".format(result))
             file_match = FilePath(
                 searched_path=result["searched_path"],
                 relative_path=result["relative_path"],
    @@ -113,7 +113,7 @@ def extract_blocks(self, source_file: FileBlock) -> Iterable[BlockTag]:
                     assert isinstance(block, BlockTag)
                     yield block
     
    -        except ParsingException as exc:
    +        except ParsingError as exc:
                 if exc.node is None:
                     exc.add_node(source_file)
                 raise
    diff --git a/core/dbt/parser/snapshots.py b/core/dbt/parser/snapshots.py
    index dffc7d90641..72aec4ee976 100644
    --- a/core/dbt/parser/snapshots.py
    +++ b/core/dbt/parser/snapshots.py
    @@ -4,7 +4,7 @@
     from dbt.dataclass_schema import ValidationError
     
     from dbt.contracts.graph.nodes import IntermediateSnapshotNode, SnapshotNode
    -from dbt.exceptions import InvalidSnapshopConfig
    +from dbt.exceptions import SnapshopConfigError
     from dbt.node_types import NodeType
     from dbt.parser.base import SQLParser
     from dbt.parser.search import BlockContents, BlockSearcher, FileBlock
    @@ -68,7 +68,7 @@ def transform(self, node: IntermediateSnapshotNode) -> SnapshotNode:
                 self.set_snapshot_attributes(parsed_node)
                 return parsed_node
             except ValidationError as exc:
    -            raise InvalidSnapshopConfig(exc, node)
    +            raise SnapshopConfigError(exc, node)
     
         def parse_file(self, file_block: FileBlock) -> None:
             blocks = BlockSearcher(
    diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py
    index cc9acea98c3..098ebde09c6 100644
    --- a/core/dbt/parser/sources.py
    +++ b/core/dbt/parser/sources.py
    @@ -26,7 +26,7 @@
     )
     from dbt.events.functions import warn_or_error
     from dbt.events.types import UnusedTables
    -from dbt.exceptions import InternalException
    +from dbt.exceptions import DbtInternalError
     from dbt.node_types import NodeType
     
     from dbt.parser.schemas import SchemaParser, ParserRef
    @@ -150,7 +150,7 @@ def parse_source(self, target: UnpatchedSourceDefinition) -> SourceDefinition:
             )
     
             if not isinstance(config, SourceConfig):
    -            raise InternalException(
    +            raise DbtInternalError(
                     f"Calculated a {type(config)} for a source, but expected a SourceConfig"
                 )
     
    diff --git a/core/dbt/parser/sql.py b/core/dbt/parser/sql.py
    index 82d09c12d6b..98e28aadc19 100644
    --- a/core/dbt/parser/sql.py
    +++ b/core/dbt/parser/sql.py
    @@ -5,7 +5,7 @@
     from dbt.contracts.graph.manifest import SourceFile
     from dbt.contracts.graph.nodes import SqlNode, Macro
     from dbt.contracts.graph.unparsed import UnparsedMacro
    -from dbt.exceptions import InternalException
    +from dbt.exceptions import DbtInternalError
     from dbt.node_types import NodeType
     from dbt.parser.base import SimpleSQLParser
     from dbt.parser.macros import MacroParser
    @@ -35,7 +35,7 @@ def resource_type(self) -> NodeType:
         def get_compiled_path(block: FileBlock):
             # we do it this way to make mypy happy
             if not isinstance(block, SqlBlock):
    -            raise InternalException(
    +            raise DbtInternalError(
                     "While parsing SQL operation, got an actual file block instead of "
                     "an SQL block: {}".format(block)
                 )
    diff --git a/core/dbt/semver.py b/core/dbt/semver.py
    index 7f8913c3600..24f00b333a1 100644
    --- a/core/dbt/semver.py
    +++ b/core/dbt/semver.py
    @@ -5,7 +5,7 @@
     
     from packaging import version as packaging_version
     
    -from dbt.exceptions import VersionsNotCompatibleException
    +from dbt.exceptions import VersionsNotCompatibleError
     import dbt.utils
     
     from dbt.dataclass_schema import dbtClassMixin, StrEnum
    @@ -94,7 +94,7 @@ def from_version_string(cls, version_string):
             match = _VERSION_REGEX.match(version_string)
     
             if not match:
    -            raise dbt.exceptions.SemverException(
    +            raise dbt.exceptions.SemverError(
                     f'"{version_string}" is not a valid semantic version.'
                 )
     
    @@ -222,7 +222,7 @@ def _try_combine_exact(self, a, b):
             if a.compare(b) == 0:
                 return a
             else:
    -            raise VersionsNotCompatibleException()
    +            raise VersionsNotCompatibleError()
     
         def _try_combine_lower_bound_with_exact(self, lower, exact):
             comparison = lower.compare(exact)
    @@ -230,7 +230,7 @@ def _try_combine_lower_bound_with_exact(self, lower, exact):
             if comparison < 0 or (comparison == 0 and lower.matcher == Matchers.GREATER_THAN_OR_EQUAL):
                 return exact
     
    -        raise VersionsNotCompatibleException()
    +        raise VersionsNotCompatibleError()
     
         def _try_combine_lower_bound(self, a, b):
             if b.is_unbounded:
    @@ -258,7 +258,7 @@ def _try_combine_upper_bound_with_exact(self, upper, exact):
             if comparison > 0 or (comparison == 0 and upper.matcher == Matchers.LESS_THAN_OR_EQUAL):
                 return exact
     
    -        raise VersionsNotCompatibleException()
    +        raise VersionsNotCompatibleError()
     
         def _try_combine_upper_bound(self, a, b):
             if b.is_unbounded:
    @@ -291,7 +291,7 @@ def reduce(self, other):
                 end = self._try_combine_upper_bound(self.end, other.end)
     
             if start.compare(end) > 0:
    -            raise VersionsNotCompatibleException()
    +            raise VersionsNotCompatibleError()
     
             return VersionRange(start=start, end=end)
     
    @@ -379,8 +379,8 @@ def reduce_versions(*args):
     
             for version_specifier in version_specifiers:
                 to_return = to_return.reduce(version_specifier.to_range())
    -    except VersionsNotCompatibleException:
    -        raise VersionsNotCompatibleException(
    +    except VersionsNotCompatibleError:
    +        raise VersionsNotCompatibleError(
                 "Could not find a satisfactory version from options: {}".format([str(a) for a in args])
             )
     
    @@ -394,7 +394,7 @@ def versions_compatible(*args):
         try:
             reduce_versions(*args)
             return True
    -    except VersionsNotCompatibleException:
    +    except VersionsNotCompatibleError:
             return False
     
     
    diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py
    index 1a948b48b2e..460152b1f20 100644
    --- a/core/dbt/task/base.py
    +++ b/core/dbt/task/base.py
    @@ -4,6 +4,7 @@
     import traceback
     from abc import ABCMeta, abstractmethod
     from typing import Type, Union, Dict, Any, Optional
    +from datetime import datetime
     
     from dbt import tracking
     from dbt import flags
    @@ -16,24 +17,18 @@
         RunningStatus,
     )
     from dbt.exceptions import (
    -    NotImplementedException,
    -    CompilationException,
    -    RuntimeException,
    -    InternalException,
    +    NotImplementedError,
    +    CompilationError,
    +    DbtRuntimeError,
    +    DbtInternalError,
     )
     from dbt.logger import log_manager
     from dbt.events.functions import fire_event
     from dbt.events.types import (
    -    DbtProjectError,
    -    DbtProjectErrorException,
    -    DbtProfileError,
    -    DbtProfileErrorException,
    -    ProfileListTitle,
    -    ListSingleProfile,
    -    NoDefinedProfiles,
    -    ProfileHelpMessage,
    +    LogDbtProjectError,
    +    LogDbtProfileError,
         CatchableExceptionOnRun,
    -    InternalExceptionOnRun,
    +    InternalErrorOnRun,
         GenericExceptionOnRun,
         NodeConnectionReleaseError,
         LogDebugStackTrace,
    @@ -102,33 +97,20 @@ def from_args(cls, args, *pargs, **kwargs):
                 # This is usually RuntimeConfig
                 config = cls.ConfigType.from_args(args)
             except dbt.exceptions.DbtProjectError as exc:
    -            fire_event(DbtProjectError())
    -            fire_event(DbtProjectErrorException(exc=str(exc)))
    +            fire_event(LogDbtProjectError(exc=str(exc)))
     
                 tracking.track_invalid_invocation(args=args, result_type=exc.result_type)
    -            raise dbt.exceptions.RuntimeException("Could not run dbt") from exc
    +            raise dbt.exceptions.DbtRuntimeError("Could not run dbt") from exc
             except dbt.exceptions.DbtProfileError as exc:
    -            fire_event(DbtProfileError())
    -            fire_event(DbtProfileErrorException(exc=str(exc)))
    -
    -            all_profiles = read_profiles(flags.PROFILES_DIR).keys()
    -
    -            if len(all_profiles) > 0:
    -                fire_event(ProfileListTitle())
    -                for profile in all_profiles:
    -                    fire_event(ListSingleProfile(profile=profile))
    -            else:
    -                fire_event(NoDefinedProfiles())
    -
    -            fire_event(ProfileHelpMessage())
    -
    +            all_profile_names = list(read_profiles(flags.PROFILES_DIR).keys())
    +            fire_event(LogDbtProfileError(exc=str(exc), profiles=all_profile_names))
                 tracking.track_invalid_invocation(args=args, result_type=exc.result_type)
    -            raise dbt.exceptions.RuntimeException("Could not run dbt") from exc
    +            raise dbt.exceptions.DbtRuntimeError("Could not run dbt") from exc
             return cls(args, config, *pargs, **kwargs)
     
         @abstractmethod
         def run(self):
    -        raise dbt.exceptions.NotImplementedException("Not Implemented")
    +        raise dbt.exceptions.NotImplementedError("Not Implemented")
     
         def interpret_results(self, results):
             return True
    @@ -142,7 +124,7 @@ def get_nearest_project_dir(project_dir: Optional[str]) -> str:
             if os.path.exists(project_file):
                 return project_dir
             else:
    -            raise dbt.exceptions.RuntimeException(
    +            raise dbt.exceptions.DbtRuntimeError(
                     "fatal: Invalid --project-dir flag. Not a dbt project. "
                     "Missing dbt_project.yml file"
                 )
    @@ -156,7 +138,7 @@ def get_nearest_project_dir(project_dir: Optional[str]) -> str:
                 return cwd
             cwd = os.path.dirname(cwd)
     
    -    raise dbt.exceptions.RuntimeException(
    +    raise dbt.exceptions.DbtRuntimeError(
             "fatal: Not a dbt project (or any of the parent directories). "
             "Missing dbt_project.yml file"
         )
    @@ -181,7 +163,7 @@ def __init__(self, args, config, manifest: Optional[Manifest] = None):
     
         def compile_manifest(self):
             if self.manifest is None:
    -            raise InternalException("compile_manifest called before manifest was loaded")
    +            raise DbtInternalError("compile_manifest called before manifest was loaded")
     
             start_compile_manifest = time.perf_counter()
     
    @@ -246,6 +228,9 @@ def run_with_hooks(self, manifest):
                 self.before_execute()
     
             result = self.safe_run(manifest)
    +        self.node.update_event_status(
    +            node_status=result.status, finished_at=datetime.utcnow().isoformat()
    +        )
     
             if not self.node.is_ephemeral_model:
                 self.after_execute(result)
    @@ -366,7 +351,7 @@ def _handle_catchable_exception(self, e, ctx):
             return str(e)
     
         def _handle_internal_exception(self, e, ctx):
    -        fire_event(InternalExceptionOnRun(build_path=self.node.build_path, exc=str(e)))
    +        fire_event(InternalErrorOnRun(build_path=self.node.build_path, exc=str(e)))
             return str(e)
     
         def _handle_generic_exception(self, e, ctx):
    @@ -382,10 +367,10 @@ def _handle_generic_exception(self, e, ctx):
             return str(e)
     
         def handle_exception(self, e, ctx):
    -        catchable_errors = (CompilationException, RuntimeException)
    +        catchable_errors = (CompilationError, DbtRuntimeError)
             if isinstance(e, catchable_errors):
                 error = self._handle_catchable_exception(e, ctx)
    -        elif isinstance(e, InternalException):
    +        elif isinstance(e, DbtInternalError):
                 error = self._handle_internal_exception(e, ctx)
             else:
                 error = self._handle_generic_exception(e, ctx)
    @@ -440,16 +425,16 @@ def _safe_release_connection(self):
             return None
     
         def before_execute(self):
    -        raise NotImplementedException()
    +        raise NotImplementedError()
     
         def execute(self, compiled_node, manifest):
    -        raise NotImplementedException()
    +        raise NotImplementedError()
     
         def run(self, compiled_node, manifest):
             return self.execute(compiled_node, manifest)
     
         def after_execute(self, result):
    -        raise NotImplementedException()
    +        raise NotImplementedError()
     
         def _skip_caused_by_ephemeral_failure(self):
             if self.skip_cause is None or self.skip_cause.node is None:
    @@ -475,7 +460,7 @@ def on_skip(self):
                     )
                     print_run_result_error(result=self.skip_cause, newline=False)
                     if self.skip_cause is None:  # mypy appeasement
    -                    raise InternalException(
    +                    raise DbtInternalError(
                             "Skip cause not set but skip was somehow caused by an ephemeral failure"
                         )
                     # set an error so dbt will exit with an error code
    @@ -486,6 +471,9 @@ def on_skip(self):
                         )
                     )
                 else:
    +                # 'skipped' nodes should not have a value for 'node_finished_at'
    +                # they do have 'node_started_at', which is set in GraphRunnableTask.call_runner
    +                self.node.update_event_status(node_status=RunStatus.Skipped)
                     fire_event(
                         SkippingDetails(
                             resource_type=self.node.resource_type,
    diff --git a/core/dbt/task/build.py b/core/dbt/task/build.py
    index aabc561bd7c..8a5dc39c9b7 100644
    --- a/core/dbt/task/build.py
    +++ b/core/dbt/task/build.py
    @@ -5,7 +5,7 @@
     
     from dbt.adapters.factory import get_adapter
     from dbt.contracts.results import NodeStatus
    -from dbt.exceptions import InternalException
    +from dbt.exceptions import DbtInternalError
     from dbt.graph import ResourceTypeSelector
     from dbt.node_types import NodeType
     from dbt.task.test import TestSelector
    @@ -44,7 +44,7 @@ def resource_types(self):
     
         def get_node_selector(self) -> ResourceTypeSelector:
             if self.manifest is None or self.graph is None:
    -            raise InternalException("manifest and graph must be set to get node selection")
    +            raise DbtInternalError("manifest and graph must be set to get node selection")
     
             resource_types = self.resource_types
     
    @@ -66,7 +66,7 @@ def get_runner_type(self, node):
     
         def compile_manifest(self):
             if self.manifest is None:
    -            raise InternalException("compile_manifest called before manifest was loaded")
    +            raise DbtInternalError("compile_manifest called before manifest was loaded")
             adapter = get_adapter(self.config)
             compiler = adapter.get_compiler()
             self.graph = compiler.compile(self.manifest, add_test_edges=True)
    diff --git a/core/dbt/task/compile.py b/core/dbt/task/compile.py
    index 92eedcd97d3..684f5b6d854 100644
    --- a/core/dbt/task/compile.py
    +++ b/core/dbt/task/compile.py
    @@ -6,7 +6,7 @@
     
     from dbt.contracts.graph.manifest import WritableManifest
     from dbt.contracts.results import RunStatus, RunResult
    -from dbt.exceptions import InternalException, RuntimeException
    +from dbt.exceptions import DbtInternalError, DbtRuntimeError
     from dbt.graph import ResourceTypeSelector
     from dbt.events.functions import fire_event
     from dbt.events.types import CompileComplete
    @@ -44,7 +44,7 @@ def raise_on_first_error(self):
     
         def get_node_selector(self) -> ResourceTypeSelector:
             if self.manifest is None or self.graph is None:
    -            raise InternalException("manifest and graph must be set to get perform node selection")
    +            raise DbtInternalError("manifest and graph must be set to get perform node selection")
             return ResourceTypeSelector(
                 graph=self.graph,
                 manifest=self.manifest,
    @@ -64,12 +64,12 @@ def _get_deferred_manifest(self) -> Optional[WritableManifest]:
     
             state = self.previous_state
             if state is None:
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     "Received a --defer argument, but no value was provided to --state"
                 )
     
             if state.manifest is None:
    -            raise RuntimeException(f'Could not find manifest in --state path: "{self.args.state}"')
    +            raise DbtRuntimeError(f'Could not find manifest in --state path: "{self.args.state}"')
             return state.manifest
     
         def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]):
    @@ -77,13 +77,14 @@ def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]):
             if deferred_manifest is None:
                 return
             if self.manifest is None:
    -            raise InternalException(
    +            raise DbtInternalError(
                     "Expected to defer to manifest, but there is no runtime manifest to defer from!"
                 )
             self.manifest.merge_from_artifact(
                 adapter=adapter,
                 other=deferred_manifest,
                 selected=selected_uids,
    +            favor_state=bool(self.args.favor_state),
             )
             # TODO: is it wrong to write the manifest here? I think it's right...
             write_manifest(self.manifest, self.config.target_path)
    diff --git a/core/dbt/task/debug.py b/core/dbt/task/debug.py
    index ab986f2b270..be8779b82f4 100644
    --- a/core/dbt/task/debug.py
    +++ b/core/dbt/task/debug.py
    @@ -5,7 +5,11 @@
     from typing import Optional, Dict, Any, List
     
     from dbt.events.functions import fire_event
    -from dbt.events.types import OpenCommand
    +from dbt.events.types import (
    +    OpenCommand,
    +    DebugCmdOut,
    +    DebugCmdResult,
    +)
     from dbt import flags
     import dbt.clients.system
     import dbt.exceptions
    @@ -98,25 +102,25 @@ def run(self):
                 return not self.any_failure
     
             version = get_installed_version().to_version_string(skip_matcher=True)
    -        print("dbt version: {}".format(version))
    -        print("python version: {}".format(sys.version.split()[0]))
    -        print("python path: {}".format(sys.executable))
    -        print("os info: {}".format(platform.platform()))
    -        print("Using profiles.yml file at {}".format(self.profile_path))
    -        print("Using dbt_project.yml file at {}".format(self.project_path))
    -        print("")
    +        fire_event(DebugCmdOut(msg="dbt version: {}".format(version)))
    +        fire_event(DebugCmdOut(msg="python version: {}".format(sys.version.split()[0])))
    +        fire_event(DebugCmdOut(msg="python path: {}".format(sys.executable)))
    +        fire_event(DebugCmdOut(msg="os info: {}".format(platform.platform())))
    +        fire_event(DebugCmdOut(msg="Using profiles.yml file at {}".format(self.profile_path)))
    +        fire_event(DebugCmdOut(msg="Using dbt_project.yml file at {}".format(self.project_path)))
             self.test_configuration()
             self.test_dependencies()
             self.test_connection()
     
             if self.any_failure:
    -            print(red(f"{(pluralize(len(self.messages), 'check'))} failed:"))
    +            fire_event(
    +                DebugCmdResult(msg=red(f"{(pluralize(len(self.messages), 'check'))} failed:"))
    +            )
             else:
    -            print(green("All checks passed!"))
    +            fire_event(DebugCmdResult(msg=green("All checks passed!")))
     
             for message in self.messages:
    -            print(message)
    -            print("")
    +            fire_event(DebugCmdResult(msg=f"{message}\n"))
     
             return not self.any_failure
     
    @@ -280,21 +284,33 @@ def test_git(self):
             return green("OK found")
     
         def test_dependencies(self):
    -        print("Required dependencies:")
    -        print(" - git [{}]".format(self.test_git()))
    -        print("")
    +        fire_event(DebugCmdOut(msg="Required dependencies:"))
    +
    +        logline_msg = self.test_git()
    +        fire_event(DebugCmdResult(msg=f" - git [{logline_msg}]\n"))
     
         def test_configuration(self):
    +        fire_event(DebugCmdOut(msg="Configuration:"))
    +
             profile_status = self._load_profile()
    +        fire_event(DebugCmdOut(msg=f"  profiles.yml file [{profile_status}]"))
    +
             project_status = self._load_project()
    -        print("Configuration:")
    -        print("  profiles.yml file [{}]".format(profile_status))
    -        print("  dbt_project.yml file [{}]".format(project_status))
    +        fire_event(DebugCmdOut(msg=f"  dbt_project.yml file [{project_status}]"))
    +
             # skip profile stuff if we can't find a profile name
             if self.profile_name is not None:
    -            print("  profile: {} [{}]".format(self.profile_name, self._profile_found()))
    -            print("  target: {} [{}]".format(self.target_name, self._target_found()))
    -        print("")
    +            fire_event(
    +                DebugCmdOut(
    +                    msg="  profile: {} [{}]\n".format(self.profile_name, self._profile_found())
    +                )
    +            )
    +            fire_event(
    +                DebugCmdOut(
    +                    msg="  target: {} [{}]\n".format(self.target_name, self._target_found())
    +                )
    +            )
    +
             self._log_project_fail()
             self._log_profile_fail()
     
    @@ -355,11 +371,12 @@ def _connection_result(self):
         def test_connection(self):
             if not self.profile:
                 return
    -        print("Connection:")
    +        fire_event(DebugCmdOut(msg="Connection:"))
             for k, v in self.profile.credentials.connection_info():
    -            print("  {}: {}".format(k, v))
    -        print("  Connection test: [{}]".format(self._connection_result()))
    -        print("")
    +            fire_event(DebugCmdOut(msg=f"  {k}: {v}"))
    +
    +        res = self._connection_result()
    +        fire_event(DebugCmdOut(msg=f"  Connection test: [{res}]\n"))
     
         @classmethod
         def validate_connection(cls, target_dict):
    diff --git a/core/dbt/task/deps.py b/core/dbt/task/deps.py
    index d4d086acbce..6db6ed20333 100644
    --- a/core/dbt/task/deps.py
    +++ b/core/dbt/task/deps.py
    @@ -19,7 +19,7 @@
         DepsInstallInfo,
         DepsListSubdirectory,
         DepsNotifyUpdatesAvailable,
    -    EmptyLine,
    +    Formatting,
     )
     from dbt.clients import system
     
    @@ -89,5 +89,5 @@ def run(self) -> None:
                         package_name=package_name, source_type=source_type, version=version
                     )
                 if packages_to_upgrade:
    -                fire_event(EmptyLine())
    +                fire_event(Formatting(""))
                     fire_event(DepsNotifyUpdatesAvailable(packages=ListOfStrings(packages_to_upgrade)))
    diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py
    index 704368cf24f..95ff76083a9 100644
    --- a/core/dbt/task/freshness.py
    +++ b/core/dbt/task/freshness.py
    @@ -15,8 +15,8 @@
         SourceFreshnessResult,
         FreshnessStatus,
     )
    -from dbt.exceptions import RuntimeException, InternalException
    -from dbt.events.functions import fire_event, info
    +from dbt.exceptions import DbtRuntimeError, DbtInternalError
    +from dbt.events.functions import fire_event
     from dbt.events.types import (
         FreshnessCheckComplete,
         LogStartLine,
    @@ -33,7 +33,7 @@
     
     class FreshnessRunner(BaseRunner):
         def on_skip(self):
    -        raise RuntimeException("Freshness: nodes cannot be skipped!")
    +        raise DbtRuntimeError("Freshness: nodes cannot be skipped!")
     
         def before_execute(self):
             description = "freshness of {0.source_name}.{0.name}".format(self.node)
    @@ -56,7 +56,6 @@ def after_execute(self, result):
             level = LogFreshnessResult.status_to_level(str(result.status))
             fire_event(
                 LogFreshnessResult(
    -                info=info(level=level),
                     status=result.status,
                     source_name=source_name,
                     table_name=table_name,
    @@ -64,7 +63,8 @@ def after_execute(self, result):
                     total=self.num_nodes,
                     execution_time=result.execution_time,
                     node_info=self.node.node_info,
    -            )
    +            ),
    +            level=level,
             )
     
         def error_result(self, node, message, start_time, timing_info):
    @@ -100,15 +100,15 @@ def execute(self, compiled_node, manifest):
             # therefore loaded_at_field should be a str. If this invariant is
             # broken, raise!
             if compiled_node.loaded_at_field is None:
    -            raise InternalException(
    +            raise DbtInternalError(
                     "Got to execute for source freshness of a source that has no loaded_at_field!"
                 )
     
             relation = self.adapter.Relation.create_from_source(compiled_node)
    -        # given a Source, calculate its fresnhess.
    +        # given a Source, calculate its freshness.
             with self.adapter.connection_for(compiled_node):
                 self.adapter.clear_transaction()
    -            freshness = self.adapter.calculate_freshness(
    +            adapter_response, freshness = self.adapter.calculate_freshness(
                     relation,
                     compiled_node.loaded_at_field,
                     compiled_node.freshness.filter,
    @@ -124,7 +124,7 @@ def execute(self, compiled_node, manifest):
                 timing=[],
                 execution_time=0,
                 message=None,
    -            adapter_response={},
    +            adapter_response=adapter_response.to_dict(omit_none=True),
                 failures=None,
                 **freshness,
             )
    @@ -132,7 +132,7 @@ def execute(self, compiled_node, manifest):
         def compile(self, manifest):
             if self.node.resource_type != NodeType.Source:
                 # should be unreachable...
    -            raise RuntimeException("fresnhess runner: got a non-Source")
    +            raise DbtRuntimeError("fresnhess runner: got a non-Source")
             # we don't do anything interesting when we compile a source node
             return self.node
     
    @@ -147,6 +147,10 @@ def node_is_match(self, node):
     
     
     class FreshnessTask(GraphRunnableTask):
    +    def defer_to_manifest(self, adapter, selected_uids):
    +        # freshness don't defer
    +        return
    +
         def result_path(self):
             if self.args.output:
                 return os.path.realpath(self.args.output)
    @@ -158,7 +162,7 @@ def raise_on_first_error(self):
     
         def get_node_selector(self):
             if self.manifest is None or self.graph is None:
    -            raise InternalException("manifest and graph must be set to get perform node selection")
    +            raise DbtInternalError("manifest and graph must be set to get perform node selection")
             return FreshnessSelector(
                 graph=self.graph,
                 manifest=self.manifest,
    diff --git a/core/dbt/task/generate.py b/core/dbt/task/generate.py
    index 119a32acf42..4b2fdc49e3a 100644
    --- a/core/dbt/task/generate.py
    +++ b/core/dbt/task/generate.py
    @@ -22,7 +22,7 @@
         ColumnMetadata,
         CatalogArtifact,
     )
    -from dbt.exceptions import InternalException, AmbiguousCatalogMatch
    +from dbt.exceptions import DbtInternalError, AmbiguousCatalogMatchError
     from dbt.include.global_project import DOCS_INDEX_FILE_PATH
     from dbt.events.functions import fire_event
     from dbt.events.types import (
    @@ -81,7 +81,7 @@ def get_table(self, data: PrimitiveDict) -> CatalogTable:
                     str(data["table_name"]),
                 )
             except KeyError as exc:
    -            raise dbt.exceptions.CompilationException(
    +            raise dbt.exceptions.CompilationError(
                     "Catalog information missing required key {} (got {})".format(exc, data)
                 )
             table: CatalogTable
    @@ -119,7 +119,7 @@ def make_unique_id_map(
                 unique_ids = source_map.get(table.key(), set())
                 for unique_id in unique_ids:
                     if unique_id in sources:
    -                    raise AmbiguousCatalogMatch(
    +                    raise AmbiguousCatalogMatchError(
                             unique_id,
                             sources[unique_id].to_dict(omit_none=True),
                             table.to_dict(omit_none=True),
    @@ -225,7 +225,7 @@ def run(self) -> CatalogArtifact:
                     shutil.copytree(asset_path, to_asset_path)
     
             if self.manifest is None:
    -            raise InternalException("self.manifest was None in run!")
    +            raise DbtInternalError("self.manifest was None in run!")
     
             adapter = get_adapter(self.config)
             with adapter.connection_named("generate_catalog"):
    diff --git a/core/dbt/task/init.py b/core/dbt/task/init.py
    index ebb097a3311..16d6b332348 100644
    --- a/core/dbt/task/init.py
    +++ b/core/dbt/task/init.py
    @@ -252,7 +252,7 @@ def run(self):
             try:
                 move_to_nearest_project_dir(self.args.project_dir)
                 in_project = True
    -        except dbt.exceptions.RuntimeException:
    +        except dbt.exceptions.DbtRuntimeError:
                 in_project = False
     
             if in_project:
    diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py
    index a0b549f620f..0ca9b89b0cc 100644
    --- a/core/dbt/task/list.py
    +++ b/core/dbt/task/list.py
    @@ -1,15 +1,21 @@
     import json
     
    +import dbt.flags
    +
     from dbt.contracts.graph.nodes import Exposure, SourceDefinition, Metric
     from dbt.graph import ResourceTypeSelector
     from dbt.task.runnable import GraphRunnableTask
     from dbt.task.test import TestSelector
     from dbt.node_types import NodeType
    -from dbt.events.functions import warn_or_error
    -from dbt.events.types import NoNodesSelected
    -from dbt.exceptions import RuntimeException, InternalException
    -from dbt.logger import log_manager
    -from dbt.events.eventmgr import EventLevel
    +from dbt.events.functions import (
    +    fire_event,
    +    warn_or_error,
    +)
    +from dbt.events.types import (
    +    NoNodesSelected,
    +    ListCmdOut,
    +)
    +from dbt.exceptions import DbtRuntimeError, DbtInternalError
     
     
     class ListTask(GraphRunnableTask):
    @@ -44,26 +50,12 @@ def __init__(self, args, config, manifest):
             super().__init__(args, config, manifest)
             if self.args.models:
                 if self.args.select:
    -                raise RuntimeException('"models" and "select" are mutually exclusive arguments')
    +                raise DbtRuntimeError('"models" and "select" are mutually exclusive arguments')
                 if self.args.resource_types:
    -                raise RuntimeException(
    +                raise DbtRuntimeError(
                         '"models" and "resource_type" are mutually exclusive ' "arguments"
                     )
     
    -    @classmethod
    -    def pre_init_hook(cls, args):
    -        """A hook called before the task is initialized."""
    -        # Filter out all INFO-level logging to allow piping ls output to jq, etc
    -        # WARN level will still include all warnings + errors
    -        # Do this by:
    -        #  - returning the log level so that we can pass it into the 'level_override'
    -        #    arg of events.functions.setup_event_logger() -- good!
    -        #  - mutating the initialized, not-yet-configured STDOUT event logger
    -        #    because it's being configured too late -- bad! TODO refactor!
    -        log_manager.stderr_console()
    -        super().pre_init_hook(args)
    -        return EventLevel.WARN
    -
         def _iterate_selected_nodes(self):
             selector = self.get_node_selector()
             spec = self.get_selection_spec()
    @@ -72,7 +64,7 @@ def _iterate_selected_nodes(self):
                 warn_or_error(NoNodesSelected())
                 return
             if self.manifest is None:
    -            raise InternalException("manifest is None in _iterate_selected_nodes")
    +            raise DbtInternalError("manifest is None in _iterate_selected_nodes")
             for node in nodes:
                 if node in self.manifest.nodes:
                     yield self.manifest.nodes[node]
    @@ -83,7 +75,7 @@ def _iterate_selected_nodes(self):
                 elif node in self.manifest.metrics:
                     yield self.manifest.metrics[node]
                 else:
    -                raise RuntimeException(
    +                raise DbtRuntimeError(
                         f'Got an unexpected result from node selection: "{node}"'
                         f"Expected a source or a node!"
                     )
    @@ -143,14 +135,19 @@ def run(self):
             elif output == "path":
                 generator = self.generate_paths
             else:
    -            raise InternalException("Invalid output {}".format(output))
    +            raise DbtInternalError("Invalid output {}".format(output))
     
             return self.output_results(generator())
     
         def output_results(self, results):
    +        """Log, or output a plain, newline-delimited, and ready-to-pipe list of nodes found."""
             for result in results:
                 self.node_results.append(result)
    -            print(result)
    +            if dbt.flags.LOG_FORMAT == "json":
    +                fire_event(ListCmdOut(msg=result))
    +            else:
    +                # Cleaner to leave as print than to mutate the logger not to print timestamps.
    +                print(result)
             return self.node_results
     
         @property
    @@ -179,9 +176,13 @@ def selection_arg(self):
             else:
                 return self.args.select
     
    +    def defer_to_manifest(self, adapter, selected_uids):
    +        # list don't defer
    +        return
    +
         def get_node_selector(self):
             if self.manifest is None or self.graph is None:
    -            raise InternalException("manifest and graph must be set to get perform node selection")
    +            raise DbtInternalError("manifest and graph must be set to get perform node selection")
             if self.resource_types == [NodeType.Test]:
                 return TestSelector(
                     graph=self.graph,
    diff --git a/core/dbt/task/printer.py b/core/dbt/task/printer.py
    index edb2592d194..9fae854bdb4 100644
    --- a/core/dbt/task/printer.py
    +++ b/core/dbt/task/printer.py
    @@ -5,7 +5,7 @@
     )
     from dbt.events.functions import fire_event
     from dbt.events.types import (
    -    EmptyLine,
    +    Formatting,
         RunResultWarning,
         RunResultWarningMessage,
         RunResultFailure,
    @@ -72,14 +72,14 @@ def print_run_status_line(results) -> None:
             stats["total"] += 1
     
         with TextOnly():
    -        fire_event(EmptyLine())
    +        fire_event(Formatting(""))
         fire_event(StatsLine(stats=stats))
     
     
     def print_run_result_error(result, newline: bool = True, is_warning: bool = False) -> None:
         if newline:
             with TextOnly():
    -            fire_event(EmptyLine())
    +            fire_event(Formatting(""))
     
         if result.status == NodeStatus.Fail or (is_warning and result.status == NodeStatus.Warn):
             if is_warning:
    @@ -109,12 +109,12 @@ def print_run_result_error(result, newline: bool = True, is_warning: bool = Fals
     
             if result.node.build_path is not None:
                 with TextOnly():
    -                fire_event(EmptyLine())
    +                fire_event(Formatting(""))
                 fire_event(SQLCompiledPath(path=result.node.compiled_path))
     
             if result.node.should_store_failures:
                 with TextOnly():
    -                fire_event(EmptyLine())
    +                fire_event(Formatting(""))
                 fire_event(CheckNodeTestFailure(relation_name=result.node.relation_name))
     
         elif result.message is not None:
    @@ -143,7 +143,7 @@ def print_run_end_messages(results, keyboard_interrupt: bool = False) -> None:
     
         with DbtStatusMessage(), InvocationProcessor():
             with TextOnly():
    -            fire_event(EmptyLine())
    +            fire_event(Formatting(""))
             fire_event(
                 EndOfRunSummary(
                     num_errors=len(errors),
    diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py
    index f21dfd570e9..2c251c2aff6 100644
    --- a/core/dbt/task/run.py
    +++ b/core/dbt/task/run.py
    @@ -21,23 +21,24 @@
     from dbt.contracts.graph.nodes import HookNode, ResultNode
     from dbt.contracts.results import NodeStatus, RunResult, RunStatus, RunningStatus, BaseResult
     from dbt.exceptions import (
    -    CompilationException,
    -    InternalException,
    -    MissingMaterialization,
    -    RuntimeException,
    -    ValidationException,
    +    CompilationError,
    +    DbtInternalError,
    +    MissingMaterializationError,
    +    DbtRuntimeError,
    +    DbtValidationError,
     )
    -from dbt.events.functions import fire_event, get_invocation_id, info
    +from dbt.events.functions import fire_event, get_invocation_id
     from dbt.events.types import (
         DatabaseErrorRunningHook,
    -    EmptyLine,
    +    Formatting,
         HooksRunning,
    -    HookFinished,
    +    FinishedRunningStats,
         LogModelResult,
         LogStartLine,
         LogHookEndLine,
         LogHookStartLine,
     )
    +from dbt.events.base_types import EventLevel
     from dbt.logger import (
         TextOnly,
         HookMetadata,
    @@ -105,7 +106,7 @@ def get_hook(source, index):
     
     def track_model_run(index, num_nodes, run_model_result):
         if tracking.active_user is None:
    -        raise InternalException("cannot track model run with no active user")
    +        raise DbtInternalError("cannot track model run with no active user")
         invocation_id = get_invocation_id()
         tracking.track_model_run(
             {
    @@ -134,14 +135,14 @@ def _validate_materialization_relations_dict(inp: Dict[Any, Any], model) -> List
                 'Invalid return value from materialization, "relations" '
                 "not found, got keys: {}".format(list(inp))
             )
    -        raise CompilationException(msg, node=model) from None
    +        raise CompilationError(msg, node=model) from None
     
         if not isinstance(relations_value, list):
             msg = (
                 'Invalid return value from materialization, "relations" '
                 "not a list, got: {}".format(relations_value)
             )
    -        raise CompilationException(msg, node=model) from None
    +        raise CompilationError(msg, node=model) from None
     
         relations: List[BaseRelation] = []
         for relation in relations_value:
    @@ -150,7 +151,7 @@ def _validate_materialization_relations_dict(inp: Dict[Any, Any], model) -> List
                     "Invalid return value from materialization, "
                     '"relations" contains non-Relation: {}'.format(relation)
                 )
    -            raise CompilationException(msg, node=model)
    +            raise CompilationError(msg, node=model)
     
             assert isinstance(relation, BaseRelation)
             relations.append(relation)
    @@ -186,10 +187,10 @@ def print_result_line(self, result):
             description = self.describe_node()
             if result.status == NodeStatus.Error:
                 status = result.status
    -            level = "error"
    +            level = EventLevel.ERROR
             else:
                 status = result.message
    -            level = "info"
    +            level = EventLevel.INFO
             fire_event(
                 LogModelResult(
                     description=description,
    @@ -198,8 +199,8 @@ def print_result_line(self, result):
                     total=self.num_nodes,
                     execution_time=result.execution_time,
                     node_info=self.node.node_info,
    -                info=info(level=level),
    -            )
    +            ),
    +            level=level,
             )
     
         def before_execute(self):
    @@ -212,7 +213,7 @@ def after_execute(self, result):
         def _build_run_model_result(self, model, context):
             result = context["load_result"]("main")
             if not result:
    -            raise RuntimeException("main is not being called during running model")
    +            raise DbtRuntimeError("main is not being called during running model")
             adapter_response = {}
             if isinstance(result.response, dbtClassMixin):
                 adapter_response = result.response.to_dict(omit_none=True)
    @@ -233,7 +234,7 @@ def _materialization_relations(self, result: Any, model) -> List[BaseRelation]:
                     'The materialization ("{}") did not explicitly return a '
                     "list of relations to add to the cache.".format(str(model.get_materialization()))
                 )
    -            raise CompilationException(msg, node=model)
    +            raise CompilationError(msg, node=model)
     
             if isinstance(result, dict):
                 return _validate_materialization_relations_dict(result, model)
    @@ -242,7 +243,7 @@ def _materialization_relations(self, result: Any, model) -> List[BaseRelation]:
                 "Invalid return value from materialization, expected a dict "
                 'with key "relations", got: {}'.format(str(result))
             )
    -        raise CompilationException(msg, node=model)
    +        raise CompilationError(msg, node=model)
     
         def execute(self, model, manifest):
             context = generate_runtime_model_context(model, self.config, manifest)
    @@ -252,10 +253,12 @@ def execute(self, model, manifest):
             )
     
             if materialization_macro is None:
    -            raise MissingMaterialization(model=model, adapter_type=self.adapter.type())
    +            raise MissingMaterializationError(
    +                materialization=model.get_materialization(), adapter_type=self.adapter.type()
    +            )
     
             if "config" not in context:
    -            raise InternalException(
    +            raise DbtInternalError(
                     "Invalid materialization context generated, missing config: {}".format(context)
                 )
             context_config = context["config"]
    @@ -264,7 +267,7 @@ def execute(self, model, manifest):
             model_lang_supported = model.language in materialization_macro.supported_languages
             if mat_has_supported_langs and not model_lang_supported:
                 str_langs = [str(lang) for lang in materialization_macro.supported_languages]
    -            raise ValidationException(
    +            raise DbtValidationError(
                     f'Materialization "{materialization_macro.name}" only supports languages {str_langs}; '
                     f'got "{model.language}"'
                 )
    @@ -312,7 +315,7 @@ def _hook_keyfunc(self, hook: HookNode) -> Tuple[str, Optional[int]]:
         def get_hooks_by_type(self, hook_type: RunHookType) -> List[HookNode]:
     
             if self.manifest is None:
    -            raise InternalException("self.manifest was None in get_hooks_by_type")
    +            raise DbtInternalError("self.manifest was None in get_hooks_by_type")
     
             nodes = self.manifest.nodes.values()
             # find all hooks defined in the manifest (could be multiple projects)
    @@ -332,7 +335,7 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context):
             num_hooks = len(ordered_hooks)
     
             with TextOnly():
    -            fire_event(EmptyLine())
    +            fire_event(Formatting(""))
             fire_event(HooksRunning(num_hooks=num_hooks, hook_type=hook_type))
     
             startctx = TimestampNamed("node_started_at")
    @@ -385,14 +388,14 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context):
             self._total_executed += len(ordered_hooks)
     
             with TextOnly():
    -            fire_event(EmptyLine())
    +            fire_event(Formatting(""))
     
         def safe_run_hooks(
             self, adapter, hook_type: RunHookType, extra_context: Dict[str, Any]
         ) -> None:
             try:
                 self.run_hooks(adapter, hook_type, extra_context)
    -        except RuntimeException as exc:
    +        except DbtRuntimeError as exc:
                 fire_event(DatabaseErrorRunningHook(hook_type=hook_type.value))
                 self.node_results.append(
                     BaseResult(
    @@ -416,9 +419,11 @@ def print_results_line(self, results, execution_time):
                 execution = utils.humanize_execution_time(execution_time=execution_time)
     
             with TextOnly():
    -            fire_event(EmptyLine())
    +            fire_event(Formatting(""))
             fire_event(
    -            HookFinished(stat_line=stat_line, execution=execution, execution_time=execution_time)
    +            FinishedRunningStats(
    +                stat_line=stat_line, execution=execution, execution_time=execution_time
    +            )
             )
     
         def before_run(self, adapter, selected_uids: AbstractSet[str]):
    @@ -454,7 +459,7 @@ def after_run(self, adapter, results):
     
         def get_node_selector(self) -> ResourceTypeSelector:
             if self.manifest is None or self.graph is None:
    -            raise InternalException("manifest and graph must be set to get perform node selection")
    +            raise DbtInternalError("manifest and graph must be set to get perform node selection")
             return ResourceTypeSelector(
                 graph=self.graph,
                 manifest=self.manifest,
    diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py
    index e86cdbd2973..16bfc304164 100644
    --- a/core/dbt/task/runnable.py
    +++ b/core/dbt/task/runnable.py
    @@ -26,7 +26,7 @@
     )
     from dbt.events.functions import fire_event, warn_or_error
     from dbt.events.types import (
    -    EmptyLine,
    +    Formatting,
         LogCancelLine,
         DefaultSelector,
         NodeStart,
    @@ -41,10 +41,10 @@
     from dbt.contracts.results import NodeStatus, RunExecutionResult, RunningStatus
     from dbt.contracts.state import PreviousState
     from dbt.exceptions import (
    -    InternalException,
    -    NotImplementedException,
    -    RuntimeException,
    -    FailFastException,
    +    DbtInternalError,
    +    NotImplementedError,
    +    DbtRuntimeError,
    +    FailFastError,
     )
     
     from dbt.graph import GraphQueue, NodeSelector, SelectionSpec, parse_difference
    @@ -116,7 +116,11 @@ def get_selection_spec(self) -> SelectionSpec:
     
         @abstractmethod
         def get_node_selector(self) -> NodeSelector:
    -        raise NotImplementedException(f"get_node_selector not implemented for task {type(self)}")
    +        raise NotImplementedError(f"get_node_selector not implemented for task {type(self)}")
    +
    +    @abstractmethod
    +    def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]):
    +        raise NotImplementedError(f"defer_to_manifest not implemented for task {type(self)}")
     
         def get_graph_queue(self) -> GraphQueue:
             selector = self.get_node_selector()
    @@ -126,7 +130,7 @@ def get_graph_queue(self) -> GraphQueue:
         def _runtime_initialize(self):
             self.compile_manifest()
             if self.manifest is None or self.graph is None:
    -            raise InternalException("_runtime_initialize never loaded the graph!")
    +            raise DbtInternalError("_runtime_initialize never loaded the graph!")
     
             self.job_queue = self.get_graph_queue()
     
    @@ -138,7 +142,7 @@ def _runtime_initialize(self):
                 elif uid in self.manifest.sources:
                     self._flattened_nodes.append(self.manifest.sources[uid])
                 else:
    -                raise InternalException(
    +                raise DbtInternalError(
                         f"Node selection returned {uid}, expected a node or a source"
                     )
     
    @@ -148,7 +152,7 @@ def raise_on_first_error(self):
             return False
     
         def get_runner_type(self, node):
    -        raise NotImplementedException("Not Implemented")
    +        raise NotImplementedError("Not Implemented")
     
         def result_path(self):
             return os.path.join(self.config.target_path, RESULT_FILE_NAME)
    @@ -188,10 +192,6 @@ def call_runner(self, runner):
                 status: Dict[str, str] = {}
                 try:
                     result = runner.run_with_hooks(self.manifest)
    -                status = runner.get_result_status(result)
    -                runner.node.update_event_status(
    -                    node_status=result.status, finished_at=datetime.utcnow().isoformat()
    -                )
                 finally:
                     finishctx = TimestampNamed("finished_at")
                     with finishctx, DbtModelState(status):
    @@ -208,7 +208,7 @@ def call_runner(self, runner):
             fail_fast = flags.FAIL_FAST
     
             if result.status in (NodeStatus.Error, NodeStatus.Fail) and fail_fast:
    -            self._raise_next_tick = FailFastException(
    +            self._raise_next_tick = FailFastError(
                     msg="Failing early due to test failure or runtime error",
                     result=result,
                     node=getattr(result, "node", None),
    @@ -217,7 +217,7 @@ def call_runner(self, runner):
                 # if we raise inside a thread, it'll just get silently swallowed.
                 # stash the error message we want here, and it will check the
                 # next 'tick' - should be soon since our thread is about to finish!
    -            self._raise_next_tick = RuntimeException(result.message)
    +            self._raise_next_tick = DbtRuntimeError(result.message)
     
             return result
     
    @@ -242,7 +242,7 @@ def _raise_set_error(self):
         def run_queue(self, pool):
             """Given a pool, submit jobs from the queue to the pool."""
             if self.job_queue is None:
    -            raise InternalException("Got to run_queue with no job queue set")
    +            raise DbtInternalError("Got to run_queue with no job queue set")
     
             def callback(result):
                 """Note: mark_done, at a minimum, must happen here or dbt will
    @@ -251,7 +251,7 @@ def callback(result):
                 self._handle_result(result)
     
                 if self.job_queue is None:
    -                raise InternalException("Got to run_queue callback with no job queue set")
    +                raise DbtInternalError("Got to run_queue callback with no job queue set")
                 self.job_queue.mark_done(result.node.unique_id)
     
             while not self.job_queue.empty():
    @@ -293,7 +293,7 @@ def _handle_result(self, result):
             node = result.node
     
             if self.manifest is None:
    -            raise InternalException("manifest was None in _handle_result")
    +            raise DbtInternalError("manifest was None in _handle_result")
     
             if isinstance(node, SourceDefinition):
                 self.manifest.update_source(node)
    @@ -343,13 +343,13 @@ def execute_nodes(self):
                     )
                 )
             with TextOnly():
    -            fire_event(EmptyLine())
    +            fire_event(Formatting(""))
     
             pool = ThreadPool(num_threads)
             try:
                 self.run_queue(pool)
     
    -        except FailFastException as failure:
    +        except FailFastError as failure:
                 self._cancel_connections(pool)
                 print_run_result_error(failure.result)
                 raise
    @@ -366,7 +366,7 @@ def execute_nodes(self):
     
         def _mark_dependent_errors(self, node_id, result, cause):
             if self.graph is None:
    -            raise InternalException("graph is None in _mark_dependent_errors")
    +            raise DbtInternalError("graph is None in _mark_dependent_errors")
             for dep_node_id in self.graph.get_dependent_nodes(node_id):
                 self._skipped_children[dep_node_id] = cause
     
    @@ -385,6 +385,7 @@ def populate_adapter_cache(self, adapter, required_schemas: Set[BaseRelation] =
         def before_run(self, adapter, selected_uids: AbstractSet[str]):
             with adapter.connection_named("master"):
                 self.populate_adapter_cache(adapter)
    +            self.defer_to_manifest(adapter, selected_uids)
     
         def after_run(self, adapter, results):
             pass
    @@ -419,11 +420,11 @@ def run(self):
             self._runtime_initialize()
     
             if self._flattened_nodes is None:
    -            raise InternalException("after _runtime_initialize, _flattened_nodes was still None")
    +            raise DbtInternalError("after _runtime_initialize, _flattened_nodes was still None")
     
             if len(self._flattened_nodes) == 0:
                 with TextOnly():
    -                fire_event(EmptyLine())
    +                fire_event(Formatting(""))
                 warn_or_error(NothingToDo())
                 result = self.get_result(
                     results=[],
    @@ -432,7 +433,7 @@ def run(self):
                 )
             else:
                 with TextOnly():
    -                fire_event(EmptyLine())
    +                fire_event(Formatting(""))
                 selected_uids = frozenset(n.unique_id for n in self._flattened_nodes)
                 result = self.execute_with_hooks(selected_uids)
     
    @@ -475,7 +476,7 @@ def interpret_results(cls, results):
     
         def get_model_schemas(self, adapter, selected_uids: Iterable[str]) -> Set[BaseRelation]:
             if self.manifest is None:
    -            raise InternalException("manifest was None in get_model_schemas")
    +            raise DbtInternalError("manifest was None in get_model_schemas")
             result: Set[BaseRelation] = set()
     
             for node in self.manifest.nodes.values():
    diff --git a/core/dbt/task/seed.py b/core/dbt/task/seed.py
    index 5c922a5ba90..9ec1df3b81f 100644
    --- a/core/dbt/task/seed.py
    +++ b/core/dbt/task/seed.py
    @@ -6,17 +6,17 @@
     )
     
     from dbt.contracts.results import RunStatus
    -from dbt.exceptions import InternalException
    +from dbt.exceptions import DbtInternalError
     from dbt.graph import ResourceTypeSelector
     from dbt.logger import TextOnly
    -from dbt.events.functions import fire_event, info
    +from dbt.events.functions import fire_event
     from dbt.events.types import (
         SeedHeader,
    -    SeedHeaderSeparator,
    -    EmptyLine,
    +    Formatting,
         LogSeedResult,
         LogStartLine,
     )
    +from dbt.events.base_types import EventLevel
     from dbt.node_types import NodeType
     from dbt.contracts.results import NodeStatus
     
    @@ -46,10 +46,9 @@ def compile(self, manifest):
     
         def print_result_line(self, result):
             model = result.node
    -        level = "error" if result.status == NodeStatus.Error else "info"
    +        level = EventLevel.ERROR if result.status == NodeStatus.Error else EventLevel.INFO
             fire_event(
                 LogSeedResult(
    -                info=info(level=level),
                     status=result.status,
                     result_message=result.message,
                     index=self.node_index,
    @@ -58,7 +57,8 @@ def print_result_line(self, result):
                     schema=self.node.schema,
                     relation=model.alias,
                     node_info=model.node_info,
    -            )
    +            ),
    +            level=level,
             )
     
     
    @@ -72,7 +72,7 @@ def raise_on_first_error(self):
     
         def get_node_selector(self):
             if self.manifest is None or self.graph is None:
    -            raise InternalException("manifest and graph must be set to get perform node selection")
    +            raise DbtInternalError("manifest and graph must be set to get perform node selection")
             return ResourceTypeSelector(
                 graph=self.graph,
                 manifest=self.manifest,
    @@ -98,13 +98,13 @@ def show_table(self, result):
     
             header = "Random sample of table: {}.{}".format(schema, alias)
             with TextOnly():
    -            fire_event(EmptyLine())
    +            fire_event(Formatting(""))
             fire_event(SeedHeader(header=header))
    -        fire_event(SeedHeaderSeparator(len_header=len(header)))
    +        fire_event(Formatting("-" * len(header)))
     
             rand_table.print_table(max_rows=10, max_columns=None)
             with TextOnly():
    -            fire_event(EmptyLine())
    +            fire_event(Formatting(""))
     
         def show_tables(self, results):
             for result in results:
    diff --git a/core/dbt/task/snapshot.py b/core/dbt/task/snapshot.py
    index 44ccbd88361..f5e8a549bb2 100644
    --- a/core/dbt/task/snapshot.py
    +++ b/core/dbt/task/snapshot.py
    @@ -1,7 +1,8 @@
     from .run import ModelRunner, RunTask
     
    -from dbt.exceptions import InternalException
    -from dbt.events.functions import fire_event, info
    +from dbt.exceptions import DbtInternalError
    +from dbt.events.functions import fire_event
    +from dbt.events.base_types import EventLevel
     from dbt.events.types import LogSnapshotResult
     from dbt.graph import ResourceTypeSelector
     from dbt.node_types import NodeType
    @@ -15,10 +16,9 @@ def describe_node(self):
         def print_result_line(self, result):
             model = result.node
             cfg = model.config.to_dict(omit_none=True)
    -        level = "error" if result.status == NodeStatus.Error else "info"
    +        level = EventLevel.ERROR if result.status == NodeStatus.Error else EventLevel.INFO
             fire_event(
                 LogSnapshotResult(
    -                info=info(level=level),
                     status=result.status,
                     description=self.get_node_representation(),
                     cfg=cfg,
    @@ -26,7 +26,8 @@ def print_result_line(self, result):
                     total=self.num_nodes,
                     execution_time=result.execution_time,
                     node_info=model.node_info,
    -            )
    +            ),
    +            level=level,
             )
     
     
    @@ -36,7 +37,7 @@ def raise_on_first_error(self):
     
         def get_node_selector(self):
             if self.manifest is None or self.graph is None:
    -            raise InternalException("manifest and graph must be set to get perform node selection")
    +            raise DbtInternalError("manifest and graph must be set to get perform node selection")
             return ResourceTypeSelector(
                 graph=self.graph,
                 manifest=self.manifest,
    diff --git a/core/dbt/task/sql.py b/core/dbt/task/sql.py
    index 4a267bd91bf..4f662383d74 100644
    --- a/core/dbt/task/sql.py
    +++ b/core/dbt/task/sql.py
    @@ -25,7 +25,7 @@ def __init__(self, config, adapter, node, node_index, num_nodes):
         def handle_exception(self, e, ctx):
             fire_event(SQLRunnerException(exc=str(e), exc_info=traceback.format_exc()))
             if isinstance(e, dbt.exceptions.Exception):
    -            if isinstance(e, dbt.exceptions.RuntimeException):
    +            if isinstance(e, dbt.exceptions.DbtRuntimeError):
                     e.add_node(ctx.node)
                 return e
     
    @@ -51,7 +51,7 @@ def error_result(self, node, error, start_time, timing_info):
             raise error
     
         def ephemeral_result(self, node, start_time, timing_info):
    -        raise dbt.exceptions.NotImplementedException("cannot execute ephemeral nodes remotely!")
    +        raise dbt.exceptions.NotImplementedError("cannot execute ephemeral nodes remotely!")
     
     
     class SqlCompileRunner(GenericSqlRunner[RemoteCompileResult]):
    diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py
    index 26d6d46f028..3ba1b0f85f2 100644
    --- a/core/dbt/task/test.py
    +++ b/core/dbt/task/test.py
    @@ -5,6 +5,7 @@
     from dbt.events.format import pluralize
     from dbt.dataclass_schema import dbtClassMixin
     import threading
    +from typing import Dict, Any
     
     from .compile import CompileRunner
     from .run import RunTask
    @@ -16,15 +17,15 @@
     from dbt.contracts.results import TestStatus, PrimitiveDict, RunResult
     from dbt.context.providers import generate_runtime_model_context
     from dbt.clients.jinja import MacroGenerator
    -from dbt.events.functions import fire_event, info
    +from dbt.events.functions import fire_event
     from dbt.events.types import (
         LogTestResult,
         LogStartLine,
     )
     from dbt.exceptions import (
    -    InternalException,
    -    InvalidBoolean,
    -    MissingMaterialization,
    +    DbtInternalError,
    +    BooleanError,
    +    MissingMaterializationError,
     )
     from dbt.graph import (
         ResourceTypeSelector,
    @@ -38,6 +39,7 @@ class TestResultData(dbtClassMixin):
         failures: int
         should_warn: bool
         should_error: bool
    +    adapter_response: Dict[str, Any]
     
         @classmethod
         def validate(cls, data):
    @@ -51,7 +53,7 @@ def convert_bool_type(field) -> bool:
                 try:
                     return bool(strtobool(field))  # type: ignore
                 except ValueError:
    -                raise InvalidBoolean(field, "get_test_sql")
    +                raise BooleanError(field, "get_test_sql")
     
             # need this so we catch both true bools and 0/1
             return bool(field)
    @@ -68,14 +70,14 @@ def print_result_line(self, result):
             fire_event(
                 LogTestResult(
                     name=model.name,
    -                info=info(level=LogTestResult.status_to_level(str(result.status))),
                     status=str(result.status),
                     index=self.node_index,
                     num_models=self.num_nodes,
                     execution_time=result.execution_time,
                     node_info=model.node_info,
                     num_failures=result.failures,
    -            )
    +            ),
    +            level=LogTestResult.status_to_level(str(result.status)),
             )
     
         def print_start_line(self):
    @@ -91,9 +93,7 @@ def print_start_line(self):
         def before_execute(self):
             self.print_start_line()
     
    -    def execute_test(
    -        self, test: TestNode, manifest: Manifest
    -    ) -> TestResultData:
    +    def execute_test(self, test: TestNode, manifest: Manifest) -> TestResultData:
             context = generate_runtime_model_context(test, self.config, manifest)
     
             materialization_macro = manifest.find_materialization_macro_by_name(
    @@ -101,10 +101,12 @@ def execute_test(
             )
     
             if materialization_macro is None:
    -            raise MissingMaterialization(model=test, adapter_type=self.adapter.type())
    +            raise MissingMaterializationError(
    +                materialization=test.get_materialization(), adapter_type=self.adapter.type()
    +            )
     
             if "config" not in context:
    -            raise InternalException(
    +            raise DbtInternalError(
                     "Invalid materialization context generated, missing config: {}".format(context)
                 )
     
    @@ -118,14 +120,14 @@ def execute_test(
             table = result["table"]
             num_rows = len(table.rows)
             if num_rows != 1:
    -            raise InternalException(
    +            raise DbtInternalError(
                     f"dbt internally failed to execute {test.unique_id}: "
                     f"Returned {num_rows} rows, but expected "
                     f"1 row"
                 )
             num_cols = len(table.columns)
             if num_cols != 3:
    -            raise InternalException(
    +            raise DbtInternalError(
                     f"dbt internally failed to execute {test.unique_id}: "
                     f"Returned {num_cols} columns, but expected "
                     f"3 columns"
    @@ -137,6 +139,7 @@ def execute_test(
                     map(_coerce_decimal, table.rows[0]),
                 )
             )
    +        test_result_dct["adapter_response"] = result["response"].to_dict(omit_none=True)
             TestResultData.validate(test_result_dct)
             return TestResultData.from_dict(test_result_dct)
     
    @@ -171,7 +174,7 @@ def execute(self, test: TestNode, manifest: Manifest):
                 thread_id=thread_id,
                 execution_time=0,
                 message=message,
    -            adapter_response={},
    +            adapter_response=result.adapter_response,
                 failures=failures,
             )
     
    @@ -203,7 +206,7 @@ def raise_on_first_error(self):
     
         def get_node_selector(self) -> TestSelector:
             if self.manifest is None or self.graph is None:
    -            raise InternalException("manifest and graph must be set to get perform node selection")
    +            raise DbtInternalError("manifest and graph must be set to get perform node selection")
             return TestSelector(
                 graph=self.graph,
                 manifest=self.manifest,
    diff --git a/core/dbt/tests/fixtures/project.py b/core/dbt/tests/fixtures/project.py
    index a8c640ef116..2e7cd435118 100644
    --- a/core/dbt/tests/fixtures/project.py
    +++ b/core/dbt/tests/fixtures/project.py
    @@ -6,7 +6,7 @@
     import warnings
     import yaml
     
    -from dbt.exceptions import CompilationException, DatabaseException
    +from dbt.exceptions import CompilationError, DbtDatabaseError
     import dbt.flags as flags
     from dbt.config.runtime import RuntimeConfig
     from dbt.adapters.factory import get_adapter, register_adapter, reset_adapters, get_adapter_by_type
    @@ -249,10 +249,16 @@ def clean_up_logging():
     # otherwise this will fail. So to test errors in those areas, you need to copy the files
     # into the project in the tests instead of putting them in the fixtures.
     @pytest.fixture(scope="class")
    -def adapter(unique_schema, project_root, profiles_root, profiles_yml, dbt_project_yml, clean_up_logging):
    +def adapter(
    +    unique_schema, project_root, profiles_root, profiles_yml, dbt_project_yml, clean_up_logging
    +):
         # The profiles.yml and dbt_project.yml should already be written out
         args = Namespace(
    -        profiles_dir=str(profiles_root), project_dir=str(project_root), target=None, profile=None, threads=None
    +        profiles_dir=str(profiles_root),
    +        project_dir=str(project_root),
    +        target=None,
    +        profile=None,
    +        threads=None,
         )
         flags.set_from_args(args, {})
         runtime_config = RuntimeConfig.from_args(args)
    @@ -494,10 +500,10 @@ def project(
         # a `load_dependencies` method.
         # Macros gets executed as part of drop_scheme in core/dbt/adapters/sql/impl.py.  When
         # the macros have errors (which is what we're actually testing for...) they end up
    -    # throwing CompilationExceptions or DatabaseExceptions
    +    # throwing CompilationErrorss or DatabaseErrors
         try:
             project.drop_test_schema()
    -    except (KeyError, AttributeError, CompilationException, DatabaseException):
    +    except (KeyError, AttributeError, CompilationError, DbtDatabaseError):
             pass
         os.chdir(orig_cwd)
         cleanup_event_logger()
    diff --git a/core/dbt/tests/util.py b/core/dbt/tests/util.py
    index 824e6f88630..147f3b758e0 100644
    --- a/core/dbt/tests/util.py
    +++ b/core/dbt/tests/util.py
    @@ -12,7 +12,12 @@
     from dbt.cli.main import dbtRunner
     from dbt.logger import log_manager
     from dbt.contracts.graph.manifest import Manifest
    -from dbt.events.functions import fire_event, capture_stdout_logs, stop_capture_stdout_logs, reset_metadata_vars
    +from dbt.events.functions import (
    +    fire_event,
    +    capture_stdout_logs,
    +    stop_capture_stdout_logs,
    +    reset_metadata_vars,
    +)
     from dbt.events.test_types import IntegrationTestDebug
     
     # =============================================================================
    diff --git a/core/dbt/tracking.py b/core/dbt/tracking.py
    index 1ca1f4d98ca..bb1b615ead7 100644
    --- a/core/dbt/tracking.py
    +++ b/core/dbt/tracking.py
    @@ -24,7 +24,7 @@
         SendingEvent,
         TrackingInitializeFailure,
     )
    -from dbt.exceptions import FailedToConnectException, NotImplementedException
    +from dbt.exceptions import FailedToConnectError, NotImplementedError
     
     sp_logger.setLevel(100)
     
    @@ -451,7 +451,7 @@ def track_run(run_command=None):
         try:
             yield
             track_invocation_end(invocation_context, result_type="ok")
    -    except (NotImplementedException, FailedToConnectException) as e:
    +    except (NotImplementedError, FailedToConnectError) as e:
             fire_event(MainEncounteredError(exc=str(e)))
             track_invocation_end(invocation_context, result_type="error")
         except Exception:
    diff --git a/core/dbt/utils.py b/core/dbt/utils.py
    index 370480ac11c..27309c4b373 100644
    --- a/core/dbt/utils.py
    +++ b/core/dbt/utils.py
    @@ -15,7 +15,7 @@
     from pathlib import PosixPath, WindowsPath
     
     from contextlib import contextmanager
    -from dbt.exceptions import ConnectionException, DuplicateAlias
    +from dbt.exceptions import ConnectionError, DuplicateAliasError
     from dbt.events.functions import fire_event
     from dbt.events.types import RetryExternalCall, RecordRetryException
     from dbt import flags
    @@ -92,13 +92,13 @@ def get_model_name_or_none(model):
     
     def get_dbt_macro_name(name):
         if name is None:
    -        raise dbt.exceptions.InternalException("Got None for a macro name!")
    +        raise dbt.exceptions.DbtInternalError("Got None for a macro name!")
         return f"{MACRO_PREFIX}{name}"
     
     
     def get_dbt_docs_name(name):
         if name is None:
    -        raise dbt.exceptions.InternalException("Got None for a doc name!")
    +        raise dbt.exceptions.DbtInternalError("Got None for a doc name!")
         return f"{DOCS_PREFIX}{name}"
     
     
    @@ -228,7 +228,7 @@ def deep_map_render(func: Callable[[Any, Tuple[Union[str, int], ...]], Any], val
             return _deep_map_render(func, value, ())
         except RuntimeError as exc:
             if "maximum recursion depth exceeded" in str(exc):
    -            raise dbt.exceptions.RecursionException("Cycle detected in deep_map_render")
    +            raise dbt.exceptions.RecursionError("Cycle detected in deep_map_render")
             raise
     
     
    @@ -365,7 +365,7 @@ def translate_mapping(self, kwargs: Mapping[str, Any]) -> Dict[str, Any]:
             for key, value in kwargs.items():
                 canonical_key = self.aliases.get(key, key)
                 if canonical_key in result:
    -                raise DuplicateAlias(kwargs, self.aliases, canonical_key)
    +                raise DuplicateAliasError(kwargs, self.aliases, canonical_key)
                 result[canonical_key] = self.translate_value(value)
             return result
     
    @@ -385,7 +385,7 @@ def translate(self, value: Mapping[str, Any]) -> Dict[str, Any]:
                 return self.translate_mapping(value)
             except RuntimeError as exc:
                 if "maximum recursion depth exceeded" in str(exc):
    -                raise dbt.exceptions.RecursionException(
    +                raise dbt.exceptions.RecursionError(
                         "Cycle detected in a value passed to translate!"
                     )
                 raise
    @@ -403,7 +403,7 @@ def translate_aliases(
     
         :returns: A dict containing all the values in kwargs referenced by their
             canonical key.
    -    :raises: `AliasException`, if a canonical key is defined more than once.
    +    :raises: `AliasError`, if a canonical key is defined more than once.
         """
         translator = Translator(aliases, recurse)
         return translator.translate(kwargs)
    @@ -624,7 +624,7 @@ def _connection_exception_retry(fn, max_attempts: int, attempt: int = 0):
                 time.sleep(1)
                 return _connection_exception_retry(fn, max_attempts, attempt + 1)
             else:
    -            raise ConnectionException("External connection exception occurred: " + str(exc))
    +            raise ConnectionError("External connection exception occurred: " + str(exc))
     
     
     # This is used to serialize the args in the run_results and in the logs.
    @@ -660,9 +660,10 @@ def args_to_dict(args):
                 "store_failures",
                 "use_experimental_parser",
             )
    +        default_empty_yaml_dict_keys = ("vars", "warn_error_options")
             if key in default_false_keys and var_args[key] is False:
                 continue
    -        if key == "vars" and var_args[key] == "{}":
    +        if key in default_empty_yaml_dict_keys and var_args[key] == "{}":
                 continue
             # this was required for a test case
             if isinstance(var_args[key], PosixPath) or isinstance(var_args[key], WindowsPath):
    @@ -686,3 +687,10 @@ def cast_to_int(integer: Optional[int]) -> int:
             return 0
         else:
             return integer
    +
    +
    +def cast_dict_to_dict_of_strings(dct):
    +    new_dct = {}
    +    for k, v in dct.items():
    +        new_dct[str(k)] = str(v)
    +    return new_dct
    diff --git a/core/dbt/version.py b/core/dbt/version.py
    index d668a902ae6..d836e2b4a43 100644
    --- a/core/dbt/version.py
    +++ b/core/dbt/version.py
    @@ -71,7 +71,7 @@ def _get_core_msg_lines(installed, latest) -> Tuple[List[List[str]], str]:
         latest_line = ["latest", latest_s, green("Up to date!")]
     
         if installed > latest:
    -        latest_line[2] = green("Ahead of latest version!")
    +        latest_line[2] = yellow("Ahead of latest version!")
         elif installed < latest:
             latest_line[2] = yellow("Update available!")
             update_info = (
    @@ -145,7 +145,7 @@ def _get_plugin_msg_info(
             compatibility_msg = yellow("Update available!")
             needs_update = True
         elif plugin > latest_plugin:
    -        compatibility_msg = green("Ahead of latest version!")
    +        compatibility_msg = yellow("Ahead of latest version!")
         else:
             compatibility_msg = green("Up to date!")
     
    @@ -235,5 +235,5 @@ def _get_adapter_plugin_names() -> Iterator[str]:
                 yield plugin_name
     
     
    -__version__ = "1.4.0b1"
    +__version__ = "1.5.0a1"
     installed = get_installed_version()
    diff --git a/core/setup.py b/core/setup.py
    index 241a70ab6bb..b5c43cc184a 100644
    --- a/core/setup.py
    +++ b/core/setup.py
    @@ -25,7 +25,7 @@
     
     
     package_name = "dbt-core"
    -package_version = "1.4.0b1"
    +package_version = "1.5.0a1"
     description = """With dbt, data analysts and engineers can build analytics \
     the way engineers build applications."""
     
    @@ -47,14 +47,14 @@
         },
         install_requires=[
             "Jinja2==3.1.2",
    -        "agate>=1.6,<1.6.4",
    +        "agate>=1.6,<1.7.1",
             "betterproto==1.2.5",
             "click>=7.0,<9",
             "colorama>=0.3.9,<0.4.7",
             "hologram>=0.0.14,<=0.0.15",
             "isodate>=0.6,<0.7",
             "logbook>=1.5,<1.6",
    -        "mashumaro[msgpack]==3.2",
    +        "mashumaro[msgpack]==3.3.1",
             "minimal-snowplow-tracker==0.0.2",
             "networkx>=2.3,<2.8.1;python_version<'3.8'",
             "networkx>=2.3,<3;python_version>='3.8'",
    diff --git a/docker/Dockerfile b/docker/Dockerfile
    index 72332c35de9..4061e1e9746 100644
    --- a/docker/Dockerfile
    +++ b/docker/Dockerfile
    @@ -14,12 +14,12 @@ FROM --platform=$build_for python:3.10.7-slim-bullseye as base
     # N.B. The refs updated automagically every release via bumpversion
     # N.B. dbt-postgres is currently found in the core codebase so a value of dbt-core@ is correct
     
    -ARG dbt_core_ref=dbt-core@v1.4.0b1
    -ARG dbt_postgres_ref=dbt-core@v1.4.0b1
    -ARG dbt_redshift_ref=dbt-redshift@v1.4.0b1
    -ARG dbt_bigquery_ref=dbt-bigquery@v1.4.0b1
    -ARG dbt_snowflake_ref=dbt-snowflake@v1.4.0b1
    -ARG dbt_spark_ref=dbt-spark@v1.4.0b1
    +ARG dbt_core_ref=dbt-core@v1.5.0a1
    +ARG dbt_postgres_ref=dbt-core@v1.5.0a1
    +ARG dbt_redshift_ref=dbt-redshift@v1.5.0a1
    +ARG dbt_bigquery_ref=dbt-bigquery@v1.5.0a1
    +ARG dbt_snowflake_ref=dbt-snowflake@v1.5.0a1
    +ARG dbt_spark_ref=dbt-spark@v1.5.0a1
     # special case args
     ARG dbt_spark_version=all
     ARG dbt_third_party
    diff --git a/plugins/postgres/dbt/adapters/postgres/__version__.py b/plugins/postgres/dbt/adapters/postgres/__version__.py
    index 27cfeecd9e8..219c289b1bf 100644
    --- a/plugins/postgres/dbt/adapters/postgres/__version__.py
    +++ b/plugins/postgres/dbt/adapters/postgres/__version__.py
    @@ -1 +1 @@
    -version = "1.4.0b1"
    +version = "1.5.0a1"
    diff --git a/plugins/postgres/dbt/adapters/postgres/connections.py b/plugins/postgres/dbt/adapters/postgres/connections.py
    index df24b0f9118..afa74a46339 100644
    --- a/plugins/postgres/dbt/adapters/postgres/connections.py
    +++ b/plugins/postgres/dbt/adapters/postgres/connections.py
    @@ -73,19 +73,19 @@ def exception_handler(self, sql):
                     logger.debug("Failed to release connection!")
                     pass
     
    -            raise dbt.exceptions.DatabaseException(str(e).strip()) from e
    +            raise dbt.exceptions.DbtDatabaseError(str(e).strip()) from e
     
             except Exception as e:
                 logger.debug("Error running SQL: {}", sql)
                 logger.debug("Rolling back transaction.")
                 self.rollback_if_open()
    -            if isinstance(e, dbt.exceptions.RuntimeException):
    +            if isinstance(e, dbt.exceptions.DbtRuntimeError):
                     # during a sql query, an internal to dbt exception was raised.
                     # this sounds a lot like a signal handler and probably has
                     # useful information, so raise it without modification.
                     raise
     
    -            raise dbt.exceptions.RuntimeException(e) from e
    +            raise dbt.exceptions.DbtRuntimeError(e) from e
     
         @classmethod
         def open(cls, connection):
    diff --git a/plugins/postgres/dbt/adapters/postgres/impl.py b/plugins/postgres/dbt/adapters/postgres/impl.py
    index 78b86234eae..9a5d5d3f8f6 100644
    --- a/plugins/postgres/dbt/adapters/postgres/impl.py
    +++ b/plugins/postgres/dbt/adapters/postgres/impl.py
    @@ -9,11 +9,11 @@
     from dbt.adapters.postgres import PostgresRelation
     from dbt.dataclass_schema import dbtClassMixin, ValidationError
     from dbt.exceptions import (
    -    CrossDbReferenceProhibited,
    -    IndexConfigNotDict,
    -    InvalidIndexConfig,
    -    RuntimeException,
    -    UnexpectedDbReference,
    +    CrossDbReferenceProhibitedError,
    +    IndexConfigNotDictError,
    +    IndexConfigError,
    +    DbtRuntimeError,
    +    UnexpectedDbReferenceError,
     )
     import dbt.utils
     
    @@ -46,9 +46,9 @@ def parse(cls, raw_index) -> Optional["PostgresIndexConfig"]:
                 cls.validate(raw_index)
                 return cls.from_dict(raw_index)
             except ValidationError as exc:
    -            raise InvalidIndexConfig(exc)
    +            raise IndexConfigError(exc)
             except TypeError:
    -            raise IndexConfigNotDict(raw_index)
    +            raise IndexConfigNotDictError(raw_index)
     
     
     @dataclass
    @@ -74,7 +74,7 @@ def verify_database(self, database):
                 database = database.strip('"')
             expected = self.config.credentials.database
             if database.lower() != expected.lower():
    -            raise UnexpectedDbReference(self.type(), database, expected)
    +            raise UnexpectedDbReferenceError(self.type(), database, expected)
             # return an empty string on success so macros can call this
             return ""
     
    @@ -107,8 +107,8 @@ def _get_catalog_schemas(self, manifest):
             schemas = super()._get_catalog_schemas(manifest)
             try:
                 return schemas.flatten()
    -        except RuntimeException as exc:
    -            raise CrossDbReferenceProhibited(self.type(), exc.msg)
    +        except DbtRuntimeError as exc:
    +            raise CrossDbReferenceProhibitedError(self.type(), exc.msg)
     
         def _link_cached_relations(self, manifest):
             schemas: Set[str] = set()
    diff --git a/plugins/postgres/dbt/adapters/postgres/relation.py b/plugins/postgres/dbt/adapters/postgres/relation.py
    index 0f3296c1818..43c8c724a74 100644
    --- a/plugins/postgres/dbt/adapters/postgres/relation.py
    +++ b/plugins/postgres/dbt/adapters/postgres/relation.py
    @@ -1,7 +1,7 @@
     from dbt.adapters.base import Column
     from dataclasses import dataclass
     from dbt.adapters.base.relation import BaseRelation
    -from dbt.exceptions import RuntimeException
    +from dbt.exceptions import DbtRuntimeError
     
     
     @dataclass(frozen=True, eq=False, repr=False)
    @@ -14,7 +14,7 @@ def __post_init__(self):
                 and self.type is not None
                 and len(self.identifier) > self.relation_max_name_length()
             ):
    -            raise RuntimeException(
    +            raise DbtRuntimeError(
                     f"Relation name '{self.identifier}' "
                     f"is longer than {self.relation_max_name_length()} characters"
                 )
    diff --git a/plugins/postgres/setup.py b/plugins/postgres/setup.py
    index 00a91759aec..ade5f95121b 100644
    --- a/plugins/postgres/setup.py
    +++ b/plugins/postgres/setup.py
    @@ -41,7 +41,7 @@ def _dbt_psycopg2_name():
     
     
     package_name = "dbt-postgres"
    -package_version = "1.4.0b1"
    +package_version = "1.5.0a1"
     description = """The postgres adapter plugin for dbt (data build tool)"""
     
     this_directory = os.path.abspath(os.path.dirname(__file__))
    diff --git a/pyproject.toml b/pyproject.toml
    index 4d9d26d4ff5..bcf52f2414c 100644
    --- a/pyproject.toml
    +++ b/pyproject.toml
    @@ -6,6 +6,6 @@ namespace_packages = true
     
     [tool.black]
     # TODO: remove global exclusion of tests when testing overhaul is complete
    -force-exclude = 'test'
    +force-exclude = 'test/'
     line-length = 99
     target-version = ['py38']
    diff --git a/scripts/env-setup.sh b/scripts/env-setup.sh
    new file mode 100644
    index 00000000000..42968b79eb1
    --- /dev/null
    +++ b/scripts/env-setup.sh
    @@ -0,0 +1,6 @@
    +#!/bin/bash
    +# Set environment variables required for integration tests
    +echo "DBT_INVOCATION_ENV=github-actions" >> $GITHUB_ENV
    +echo "DBT_TEST_USER_1=dbt_test_user_1" >> $GITHUB_ENV
    +echo "DBT_TEST_USER_2=dbt_test_user_2" >> $GITHUB_ENV
    +echo "DBT_TEST_USER_3=dbt_test_user_3" >> $GITHUB_ENV
    diff --git a/test/integration/018_adapter_ddl_tests/models/materialized.sql b/test/integration/018_adapter_ddl_tests/models/materialized.sql
    deleted file mode 100644
    index edd9c8e04bf..00000000000
    --- a/test/integration/018_adapter_ddl_tests/models/materialized.sql
    +++ /dev/null
    @@ -1,9 +0,0 @@
    -{{
    -  config(
    -    materialized = "table",
    -    sort = 'first_name',
    -    dist = 'first_name'
    -  )
    -}}
    -
    -select * from {{ this.schema }}.seed
    diff --git a/test/integration/018_adapter_ddl_tests/seed.sql b/test/integration/018_adapter_ddl_tests/seed.sql
    deleted file mode 100644
    index 695cfbeffdf..00000000000
    --- a/test/integration/018_adapter_ddl_tests/seed.sql
    +++ /dev/null
    @@ -1,110 +0,0 @@
    -create table {schema}.seed (
    -	id BIGSERIAL PRIMARY KEY,
    -	first_name VARCHAR(50),
    -	last_name VARCHAR(50),
    -	email VARCHAR(50),
    -	gender VARCHAR(50),
    -	ip_address VARCHAR(20)
    -);
    -
    -
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jack', 'Hunter', 'jhunter0@pbs.org', 'Male', '59.80.20.168');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Kathryn', 'Walker', 'kwalker1@ezinearticles.com', 'Female', '194.121.179.35');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gerald', 'Ryan', 'gryan2@com.com', 'Male', '11.3.212.243');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Bonnie', 'Spencer', 'bspencer3@ameblo.jp', 'Female', '216.32.196.175');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Harold', 'Taylor', 'htaylor4@people.com.cn', 'Male', '253.10.246.136');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jacqueline', 'Griffin', 'jgriffin5@t.co', 'Female', '16.13.192.220');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Wanda', 'Arnold', 'warnold6@google.nl', 'Female', '232.116.150.64');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Craig', 'Ortiz', 'cortiz7@sciencedaily.com', 'Male', '199.126.106.13');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gary', 'Day', 'gday8@nih.gov', 'Male', '35.81.68.186');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Rose', 'Wright', 'rwright9@yahoo.co.jp', 'Female', '236.82.178.100');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Raymond', 'Kelley', 'rkelleya@fc2.com', 'Male', '213.65.166.67');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gerald', 'Robinson', 'grobinsonb@disqus.com', 'Male', '72.232.194.193');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Mildred', 'Martinez', 'mmartinezc@samsung.com', 'Female', '198.29.112.5');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Dennis', 'Arnold', 'darnoldd@google.com', 'Male', '86.96.3.250');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Judy', 'Gray', 'jgraye@opensource.org', 'Female', '79.218.162.245');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Theresa', 'Garza', 'tgarzaf@epa.gov', 'Female', '21.59.100.54');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gerald', 'Robertson', 'grobertsong@csmonitor.com', 'Male', '131.134.82.96');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Philip', 'Hernandez', 'phernandezh@adobe.com', 'Male', '254.196.137.72');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Julia', 'Gonzalez', 'jgonzalezi@cam.ac.uk', 'Female', '84.240.227.174');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Andrew', 'Davis', 'adavisj@patch.com', 'Male', '9.255.67.25');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Kimberly', 'Harper', 'kharperk@foxnews.com', 'Female', '198.208.120.253');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Mark', 'Martin', 'mmartinl@marketwatch.com', 'Male', '233.138.182.153');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Cynthia', 'Ruiz', 'cruizm@google.fr', 'Female', '18.178.187.201');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Samuel', 'Carroll', 'scarrolln@youtu.be', 'Male', '128.113.96.122');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jennifer', 'Larson', 'jlarsono@vinaora.com', 'Female', '98.234.85.95');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ashley', 'Perry', 'aperryp@rakuten.co.jp', 'Female', '247.173.114.52');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Howard', 'Rodriguez', 'hrodriguezq@shutterfly.com', 'Male', '231.188.95.26');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Amy', 'Brooks', 'abrooksr@theatlantic.com', 'Female', '141.199.174.118');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Louise', 'Warren', 'lwarrens@adobe.com', 'Female', '96.105.158.28');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Tina', 'Watson', 'twatsont@myspace.com', 'Female', '251.142.118.177');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Janice', 'Kelley', 'jkelleyu@creativecommons.org', 'Female', '239.167.34.233');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Terry', 'Mccoy', 'tmccoyv@bravesites.com', 'Male', '117.201.183.203');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jeffrey', 'Morgan', 'jmorganw@surveymonkey.com', 'Male', '78.101.78.149');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Louis', 'Harvey', 'lharveyx@sina.com.cn', 'Male', '51.50.0.167');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Philip', 'Miller', 'pmillery@samsung.com', 'Male', '103.255.222.110');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Willie', 'Marshall', 'wmarshallz@ow.ly', 'Male', '149.219.91.68');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Patrick', 'Lopez', 'plopez10@redcross.org', 'Male', '250.136.229.89');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Adam', 'Jenkins', 'ajenkins11@harvard.edu', 'Male', '7.36.112.81');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Benjamin', 'Cruz', 'bcruz12@linkedin.com', 'Male', '32.38.98.15');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ruby', 'Hawkins', 'rhawkins13@gmpg.org', 'Female', '135.171.129.255');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Carlos', 'Barnes', 'cbarnes14@a8.net', 'Male', '240.197.85.140');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ruby', 'Griffin', 'rgriffin15@bravesites.com', 'Female', '19.29.135.24');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Sean', 'Mason', 'smason16@icq.com', 'Male', '159.219.155.249');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Anthony', 'Payne', 'apayne17@utexas.edu', 'Male', '235.168.199.218');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Steve', 'Cruz', 'scruz18@pcworld.com', 'Male', '238.201.81.198');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Anthony', 'Garcia', 'agarcia19@flavors.me', 'Male', '25.85.10.18');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Doris', 'Lopez', 'dlopez1a@sphinn.com', 'Female', '245.218.51.238');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Susan', 'Nichols', 'snichols1b@freewebs.com', 'Female', '199.99.9.61');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Wanda', 'Ferguson', 'wferguson1c@yahoo.co.jp', 'Female', '236.241.135.21');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Andrea', 'Pierce', 'apierce1d@google.co.uk', 'Female', '132.40.10.209');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Lawrence', 'Phillips', 'lphillips1e@jugem.jp', 'Male', '72.226.82.87');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Judy', 'Gilbert', 'jgilbert1f@multiply.com', 'Female', '196.250.15.142');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Eric', 'Williams', 'ewilliams1g@joomla.org', 'Male', '222.202.73.126');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ralph', 'Romero', 'rromero1h@sogou.com', 'Male', '123.184.125.212');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jean', 'Wilson', 'jwilson1i@ocn.ne.jp', 'Female', '176.106.32.194');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Lori', 'Reynolds', 'lreynolds1j@illinois.edu', 'Female', '114.181.203.22');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Donald', 'Moreno', 'dmoreno1k@bbc.co.uk', 'Male', '233.249.97.60');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Steven', 'Berry', 'sberry1l@eepurl.com', 'Male', '186.193.50.50');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Theresa', 'Shaw', 'tshaw1m@people.com.cn', 'Female', '120.37.71.222');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('John', 'Stephens', 'jstephens1n@nationalgeographic.com', 'Male', '191.87.127.115');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Richard', 'Jacobs', 'rjacobs1o@state.tx.us', 'Male', '66.210.83.155');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Andrew', 'Lawson', 'alawson1p@over-blog.com', 'Male', '54.98.36.94');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Peter', 'Morgan', 'pmorgan1q@rambler.ru', 'Male', '14.77.29.106');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Nicole', 'Garrett', 'ngarrett1r@zimbio.com', 'Female', '21.127.74.68');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Joshua', 'Kim', 'jkim1s@edublogs.org', 'Male', '57.255.207.41');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ralph', 'Roberts', 'rroberts1t@people.com.cn', 'Male', '222.143.131.109');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('George', 'Montgomery', 'gmontgomery1u@smugmug.com', 'Male', '76.75.111.77');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gerald', 'Alvarez', 'galvarez1v@flavors.me', 'Male', '58.157.186.194');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Donald', 'Olson', 'dolson1w@whitehouse.gov', 'Male', '69.65.74.135');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Carlos', 'Morgan', 'cmorgan1x@pbs.org', 'Male', '96.20.140.87');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Aaron', 'Stanley', 'astanley1y@webnode.com', 'Male', '163.119.217.44');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Virginia', 'Long', 'vlong1z@spiegel.de', 'Female', '204.150.194.182');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Robert', 'Berry', 'rberry20@tripadvisor.com', 'Male', '104.19.48.241');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Antonio', 'Brooks', 'abrooks21@unesco.org', 'Male', '210.31.7.24');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ruby', 'Garcia', 'rgarcia22@ovh.net', 'Female', '233.218.162.214');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jack', 'Hanson', 'jhanson23@blogtalkradio.com', 'Male', '31.55.46.199');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Kathryn', 'Nelson', 'knelson24@walmart.com', 'Female', '14.189.146.41');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jason', 'Reed', 'jreed25@printfriendly.com', 'Male', '141.189.89.255');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('George', 'Coleman', 'gcoleman26@people.com.cn', 'Male', '81.189.221.144');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Rose', 'King', 'rking27@ucoz.com', 'Female', '212.123.168.231');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Johnny', 'Holmes', 'jholmes28@boston.com', 'Male', '177.3.93.188');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Katherine', 'Gilbert', 'kgilbert29@altervista.org', 'Female', '199.215.169.61');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Joshua', 'Thomas', 'jthomas2a@ustream.tv', 'Male', '0.8.205.30');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Julie', 'Perry', 'jperry2b@opensource.org', 'Female', '60.116.114.192');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Richard', 'Perry', 'rperry2c@oracle.com', 'Male', '181.125.70.232');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Kenneth', 'Ruiz', 'kruiz2d@wikimedia.org', 'Male', '189.105.137.109');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jose', 'Morgan', 'jmorgan2e@webnode.com', 'Male', '101.134.215.156');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Donald', 'Campbell', 'dcampbell2f@goo.ne.jp', 'Male', '102.120.215.84');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Debra', 'Collins', 'dcollins2g@uol.com.br', 'Female', '90.13.153.235');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jesse', 'Johnson', 'jjohnson2h@stumbleupon.com', 'Male', '225.178.125.53');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Elizabeth', 'Stone', 'estone2i@histats.com', 'Female', '123.184.126.221');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Angela', 'Rogers', 'arogers2j@goodreads.com', 'Female', '98.104.132.187');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Emily', 'Dixon', 'edixon2k@mlb.com', 'Female', '39.190.75.57');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Albert', 'Scott', 'ascott2l@tinypic.com', 'Male', '40.209.13.189');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Barbara', 'Peterson', 'bpeterson2m@ow.ly', 'Female', '75.249.136.180');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Adam', 'Greene', 'agreene2n@fastcompany.com', 'Male', '184.173.109.144');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Earl', 'Sanders', 'esanders2o@hc360.com', 'Male', '247.34.90.117');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Angela', 'Brooks', 'abrooks2p@mtv.com', 'Female', '10.63.249.126');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Harold', 'Foster', 'hfoster2q@privacy.gov.au', 'Male', '139.214.40.244');
    -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Carl', 'Meyer', 'cmeyer2r@disqus.com', 'Male', '204.117.7.88');
    diff --git a/test/integration/018_adapter_ddl_tests/test_adapter_ddl.py b/test/integration/018_adapter_ddl_tests/test_adapter_ddl.py
    deleted file mode 100644
    index 99162efde67..00000000000
    --- a/test/integration/018_adapter_ddl_tests/test_adapter_ddl.py
    +++ /dev/null
    @@ -1,23 +0,0 @@
    -from test.integration.base import DBTIntegrationTest, use_profile
    -
    -class TestAdapterDDL(DBTIntegrationTest):
    -
    -    def setUp(self):
    -        DBTIntegrationTest.setUp(self)
    -
    -        self.run_sql_file("seed.sql")
    -
    -    @property
    -    def schema(self):
    -        return "adaper_ddl_018"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @use_profile('postgres')
    -    def test_sort_and_dist_keys_are_nops_on_postgres(self):
    -        results = self.run_dbt(['run'])
    -        self.assertEqual(len(results), 1)
    -
    -        self.assertTablesEqual("seed","materialized")
    diff --git a/test/integration/022_timezones_tests/models/timezones.sql b/test/integration/022_timezones_tests/models/timezones.sql
    deleted file mode 100644
    index 87d565487e1..00000000000
    --- a/test/integration/022_timezones_tests/models/timezones.sql
    +++ /dev/null
    @@ -1,10 +0,0 @@
    -
    -{{
    -    config(
    -        materialized='table'
    -    )
    -}}
    -
    -select
    -    '{{ run_started_at.astimezone(modules.pytz.timezone("America/New_York")) }}' as run_started_at_est,
    -    '{{ run_started_at }}' as run_started_at_utc
    diff --git a/test/integration/022_timezones_tests/test_timezones.py b/test/integration/022_timezones_tests/test_timezones.py
    deleted file mode 100644
    index 993f9dcb83f..00000000000
    --- a/test/integration/022_timezones_tests/test_timezones.py
    +++ /dev/null
    @@ -1,52 +0,0 @@
    -from freezegun import freeze_time
    -from test.integration.base import DBTIntegrationTest, use_profile
    -
    -
    -class TestTimezones(DBTIntegrationTest):
    -
    -    @property
    -    def schema(self):
    -        return "timezones_022"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @property
    -    def profile_config(self):
    -        return {
    -            'test': {
    -                'outputs': {
    -                    'dev': {
    -                        'type': 'postgres',
    -                        'threads': 1,
    -                        'host': self.database_host,
    -                        'port': 5432,
    -                        'user': "root",
    -                        'pass': "password",
    -                        'dbname': 'dbt',
    -                        'schema': self.unique_schema()
    -                    },
    -                },
    -                'target': 'dev'
    -            }
    -        }
    -
    -    @property
    -    def query(self):
    -        return """
    -            select
    -              run_started_at_est,
    -              run_started_at_utc
    -            from {schema}.timezones
    -        """.format(schema=self.unique_schema())
    -
    -    @freeze_time("2017-01-01 03:00:00", tz_offset=0)
    -    @use_profile('postgres')
    -    def test_postgres_run_started_at(self):
    -        results = self.run_dbt(['run'])
    -        self.assertEqual(len(results), 1)
    -        result = self.run_sql(self.query, fetch='all')[0]
    -        est, utc = result
    -        self.assertEqual(utc, '2017-01-01 03:00:00+00:00')
    -        self.assertEqual(est, '2016-12-31 22:00:00-05:00')
    diff --git a/test/integration/033_event_tracking_tests/model-compilation-error/bad_ref.sql b/test/integration/033_event_tracking_tests/model-compilation-error/bad_ref.sql
    deleted file mode 100644
    index 06dd3b0d29c..00000000000
    --- a/test/integration/033_event_tracking_tests/model-compilation-error/bad_ref.sql
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -
    -select * from {{ ref('does_not_exist') }}
    diff --git a/test/integration/033_event_tracking_tests/models/example.sql b/test/integration/033_event_tracking_tests/models/example.sql
    deleted file mode 100644
    index 2cd691ea7b4..00000000000
    --- a/test/integration/033_event_tracking_tests/models/example.sql
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -
    -select 1 as id
    diff --git a/test/integration/033_event_tracking_tests/models/example_2.sql b/test/integration/033_event_tracking_tests/models/example_2.sql
    deleted file mode 100644
    index 6e892d91c47..00000000000
    --- a/test/integration/033_event_tracking_tests/models/example_2.sql
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -
    -select * from {{ ref('example') }}
    -union all
    -select * from {{ ref('example') }}
    diff --git a/test/integration/033_event_tracking_tests/models/model_error.sql b/test/integration/033_event_tracking_tests/models/model_error.sql
    deleted file mode 100644
    index 45c65306faf..00000000000
    --- a/test/integration/033_event_tracking_tests/models/model_error.sql
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -
    -select * from a_thing_that_does_not_exist
    diff --git a/test/integration/033_event_tracking_tests/models/schema.yml b/test/integration/033_event_tracking_tests/models/schema.yml
    deleted file mode 100644
    index 5ac3436dc22..00000000000
    --- a/test/integration/033_event_tracking_tests/models/schema.yml
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -version: 2
    -models:
    -- name: example
    -  columns:
    -  - name: id
    -    tests:
    -    - unique
    -- name: example_2
    -  columns:
    -  - name: id
    -    tests:
    -    - unique
    diff --git a/test/integration/033_event_tracking_tests/models/snapshottable.sql b/test/integration/033_event_tracking_tests/models/snapshottable.sql
    deleted file mode 100644
    index 3c9a65a221a..00000000000
    --- a/test/integration/033_event_tracking_tests/models/snapshottable.sql
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -
    -select
    -  1 as id,
    -  '2018-07-15T00:00:00Z'::timestamp as updated_at
    diff --git a/test/integration/033_event_tracking_tests/seeds/example_seed.csv b/test/integration/033_event_tracking_tests/seeds/example_seed.csv
    deleted file mode 100644
    index bfde6bfa0b8..00000000000
    --- a/test/integration/033_event_tracking_tests/seeds/example_seed.csv
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -a,b,c
    -1,2,3
    diff --git a/test/integration/033_event_tracking_tests/snapshots/a.sql b/test/integration/033_event_tracking_tests/snapshots/a.sql
    deleted file mode 100644
    index dd90278e560..00000000000
    --- a/test/integration/033_event_tracking_tests/snapshots/a.sql
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -{% snapshot snapshotted %}
    -    {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at')}}
    -    select * from {{ schema }}.snapshottable
    -{% endsnapshot %}
    diff --git a/test/integration/033_event_tracking_tests/test_events.py b/test/integration/033_event_tracking_tests/test_events.py
    deleted file mode 100644
    index 1bcbbcec688..00000000000
    --- a/test/integration/033_event_tracking_tests/test_events.py
    +++ /dev/null
    @@ -1,986 +0,0 @@
    -# NOTE: turning off event tracking tests! [#3631](https://github.com/dbt-labs/dbt-core/issues/3631)
    -# from test.integration.base import DBTIntegrationTest, use_profile
    -# import hashlib
    -# import os
    -
    -# from unittest.mock import call, ANY, patch
    -
    -# import dbt.exceptions
    -# import dbt.version
    -# import dbt.tracking
    -# import dbt.utils
    -
    -
    -# # immutably creates a new array with the value inserted at the index
    -# def inserted(value, index, arr):
    -#     x = []
    -#     for i in range(0, len(arr)):
    -#         if i == index:
    -#             x.append(value)
    -#             x.append(arr[i])
    -#         else:
    -#             x.append(arr[i])
    -#         return x
    -
    -# class TestEventTracking(DBTIntegrationTest):
    -#     maxDiff = None
    -
    -#     @property
    -#     def profile_config(self):
    -#         return {
    -#             'config': {
    -#                 'send_anonymous_usage_stats': True
    -#             }
    -#         }
    -
    -#     @property
    -#     def schema(self):
    -#         return "event_tracking_033"
    -
    -#     @staticmethod
    -#     def dir(path):
    -#         return path.lstrip("/")
    -
    -#     @property
    -#     def models(self):
    -#         return self.dir("models")
    -
    -#     # TODO : Handle the subject. Should be the same every time!
    -#     # TODO : Regex match a uuid for user_id, invocation_id?
    -
    -#     @patch('dbt.tracking.tracker.track_struct_event')
    -#     def run_event_test(
    -#         self,
    -#         cmd,
    -#         expected_calls,
    -#         expected_contexts,
    -#         track_fn,
    -#         expect_pass=True,
    -#         expect_raise=False
    -#     ):
    -#         self.run_dbt(["deps"])
    -#         track_fn.reset_mock()
    -
    -#         project_id = hashlib.md5(
    -#             self.config.project_name.encode('utf-8')).hexdigest()
    -#         version = str(dbt.version.get_installed_version())
    -
    -#         if expect_raise:
    -#             with self.assertRaises(BaseException):
    -#                 self.run_dbt(cmd, expect_pass=expect_pass)
    -#         else:
    -#             self.run_dbt(cmd, expect_pass=expect_pass)
    -
    -#         user_id = dbt.tracking.active_user.id
    -#         invocation_id = dbt.tracking.active_user.invocation_id
    -
    -#         self.assertTrue(len(user_id) > 0)
    -#         self.assertTrue(len(invocation_id) > 0)
    -
    -#         track_fn.assert_has_calls(expected_calls)
    -
    -#         ordered_contexts = []
    -
    -#         for (args, kwargs) in track_fn.call_args_list:
    -#             ordered_contexts.append(
    -#                 [context.__dict__ for context in kwargs['context']]
    -#             )
    -
    -#         populated_contexts = []
    -
    -#         for context in expected_contexts:
    -#             if callable(context):
    -#                 populated_contexts.append(context(
    -#                     project_id, user_id, invocation_id, version))
    -#             else:
    -#                 populated_contexts.append(context)
    -
    -#         return ordered_contexts == populated_contexts
    -
    -#     def load_context(self):
    -
    -#         def populate(project_id, user_id, invocation_id, version):
    -#             return [{
    -#                 'schema': 'iglu:com.dbt/load_all_timing/jsonschema/1-0-3',
    -#                 'data': {
    -#                     'invocation_id': invocation_id,
    -#                     'project_id': project_id,
    -#                     'parsed_path_count': ANY,
    -#                     'path_count': ANY,
    -#                     'is_partial_parse_enabled': ANY,
    -#                     'is_static_analysis_enabled': ANY,
    -#                     'static_analysis_path_count': ANY,
    -#                     'static_analysis_parsed_path_count': ANY,
    -#                     'load_all_elapsed': ANY,
    -#                     'read_files_elapsed': ANY,
    -#                     'load_macros_elapsed': ANY,
    -#                     'parse_project_elapsed': ANY,
    -#                     'patch_sources_elapsed': ANY,
    -#                     'process_manifest_elapsed': ANY,
    -#                 },
    -#             }]
    -#         return populate
    -
    -#     def resource_counts_context(self):
    -#        return [
    -#           {
    -#               'schema': 'iglu:com.dbt/resource_counts/jsonschema/1-0-0',
    -#               'data': {
    -#                   'models': ANY,
    -#                   'tests': ANY,
    -#                   'snapshots': ANY,
    -#                   'analyses': ANY,
    -#                   'macros': ANY,
    -#                   'operations': ANY,
    -#                   'seeds': ANY,
    -#                   'sources': ANY,
    -#                   'exposures': ANY,
    -#               }
    -#           }
    -#        ]
    -
    -#     def build_context(
    -#         self,
    -#         command,
    -#         progress,
    -#         result_type=None,
    -#         adapter_type='postgres'
    -#     ):
    -
    -#         def populate(
    -#             project_id,
    -#             user_id,
    -#             invocation_id,
    -#             version
    -#         ):
    -#             return [
    -#                 {
    -#                     'schema': 'iglu:com.dbt/invocation/jsonschema/1-0-1',
    -#                     'data': {
    -#                         'project_id': project_id,
    -#                         'user_id': user_id,
    -#                         'invocation_id': invocation_id,
    -#                         'command': command,
    -#                         'options': None,  # TODO : Add options to compile cmd!
    -#                         'version': version,
    -
    -#                         'run_type': 'regular',
    -#                         'adapter_type': adapter_type,
    -#                         'progress': progress,
    -
    -#                         'result_type': result_type,
    -#                         'result': None,
    -#                     }
    -#                 },
    -#                 {
    -#                     'schema': 'iglu:com.dbt/platform/jsonschema/1-0-0',
    -#                     'data': ANY
    -#                 },
    -#                 {
    -#                     'schema': 'iglu:com.dbt/invocation_env/jsonschema/1-0-0',
    -#                     'data': ANY
    -#                 }
    -#             ]
    -
    -#         return populate
    -
    -#     def run_context(
    -#         self,
    -#         materialization,
    -#         hashed_contents,
    -#         model_id,
    -#         index,
    -#         total,
    -#         status,
    -#     ):
    -#         timing = []
    -#         error = False
    -
    -#         if status != 'ERROR':
    -#             timing = [ANY, ANY]
    -#         else:
    -#             error = True
    -
    -#         def populate(project_id, user_id, invocation_id, version):
    -#             return [{
    -#                 'schema': 'iglu:com.dbt/run_model/jsonschema/1-0-1',
    -#                 'data': {
    -#                     'invocation_id': invocation_id,
    -
    -#                     'model_materialization': materialization,
    -
    -#                     'execution_time': ANY,
    -#                     'hashed_contents': hashed_contents,
    -#                     'model_id': model_id,
    -
    -#                     'index': index,
    -#                     'total': total,
    -
    -#                     'run_status': status,
    -#                     'run_error': error,
    -#                     'run_skipped': False,
    -
    -#                     'timing': timing,
    -#                 },
    -#             }]
    -
    -#         return populate
    -
    -
    -# class TestEventTrackingSuccess(TestEventTracking):
    -#     @property
    -#     def packages_config(self):
    -#         return {
    -#             'packages': [
    -#                 {
    -#                     'git': 'https://github.com/dbt-labs/dbt-integration-project',
    -#                     'revision': 'dbt/1.0.0',
    -#                 },
    -#             ],
    -#         }
    -
    -#     @property
    -#     def project_config(self):
    -#         return {
    -#             'config-version': 2,
    -#             "seed-paths": [self.dir("data")],
    -#             "test-paths": [self.dir("test")],
    -#             'seeds': {
    -#                 'quote_columns': False,
    -#             }
    -#         }
    -
    -#     @use_profile("postgres")
    -#     def test__postgres_event_tracking_compile(self):
    -#         expected_calls_A = [
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='start',
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='load_project',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='resource_counts',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='end',
    -#                 context=ANY
    -#             ),
    -#         ]
    -
    -#         expected_calls_B = inserted(
    -#             call(
    -#                 category='dbt',
    -#                 action='experimental_parser',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             3,
    -#             expected_calls_A
    -#         )
    -
    -#         expected_contexts = [
    -#             self.build_context('compile', 'start'),
    -#             self.load_context(),
    -#             self.resource_counts_context(),
    -#             self.build_context('compile', 'end', result_type='ok')
    -#         ]
    -
    -#         test_result_A = self.run_event_test(
    -#             ["compile", "--vars", "sensitive_thing: abc"],
    -#             expected_calls_A,
    -#             expected_contexts
    -#         )
    -
    -#         test_result_B = self.run_event_test(
    -#             ["compile", "--vars", "sensitive_thing: abc"],
    -#             expected_calls_B,
    -#             expected_contexts
    -#         )
    -
    -#         self.assertTrue(test_result_A or test_result_B)
    -
    -#     @use_profile("postgres")
    -#     def test__postgres_event_tracking_deps(self):
    -#         package_context = [
    -#             {
    -#                 'schema': 'iglu:com.dbt/invocation/jsonschema/1-0-1',
    -#                 'data': {
    -#                     'project_id': '098f6bcd4621d373cade4e832627b4f6',
    -#                     'user_id': ANY,
    -#                     'invocation_id': ANY,
    -#                     'version': ANY,
    -#                     'command': 'deps',
    -#                     'run_type': 'regular',
    -#                     'options': None,
    -#                     'adapter_type': 'postgres'
    -#                 }
    -#             },
    -#             {
    -#                 'schema': 'iglu:com.dbt/package_install/jsonschema/1-0-0',
    -#                 'data': {
    -#                     'name': 'c5552991412d1cd86e5c20a87f3518d5',
    -#                     'source': 'git',
    -#                     'version': '6deb95629194572d44ca26c4bc25b573'
    -#                 }
    -#             }
    -#         ]
    -
    -#         expected_calls = [
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='start',
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='package',
    -#                 label=ANY,
    -#                 property_='install',
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='end',
    -#                 context=ANY
    -#             ),
    -#         ]
    -
    -#         expected_contexts = [
    -#             self.build_context('deps', 'start'),
    -#             package_context,
    -#             self.build_context('deps', 'end', result_type='ok')
    -#         ]
    -
    -#         test_result = self.run_event_test(["deps"], expected_calls, expected_contexts)
    -#         self.assertTrue(test_result)
    -
    -#     @use_profile("postgres")
    -#     def test__postgres_event_tracking_seed(self):
    -#         def seed_context(project_id, user_id, invocation_id, version):
    -#             return [{
    -#                 'schema': 'iglu:com.dbt/run_model/jsonschema/1-0-1',
    -#                 'data': {
    -#                     'invocation_id': invocation_id,
    -
    -#                     'model_materialization': 'seed',
    -
    -#                     'execution_time': ANY,
    -#                     'hashed_contents': 'd41d8cd98f00b204e9800998ecf8427e',
    -#                     'model_id': '39bc2cd707d99bd3e600d2faaafad7ae',
    -
    -#                     'index': 1,
    -#                     'total': 1,
    -
    -#                     'run_status': 'SUCCESS',
    -#                     'run_error': False,
    -#                     'run_skipped': False,
    -
    -#                     'timing': [ANY, ANY],
    -#                 },
    -#             }]
    -
    -#         expected_calls_A = [
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='start',
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='load_project',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='resource_counts',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='run_model',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='end',
    -#                 context=ANY
    -#             ),
    -#         ]
    -
    -#         expected_calls_B = inserted(
    -#             call(
    -#                 category='dbt',
    -#                 action='experimental_parser',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             3,
    -#             expected_calls_A
    -#         )
    -
    -#         expected_contexts = [
    -#             self.build_context('seed', 'start'),
    -#             self.load_context(),
    -#             self.resource_counts_context(),
    -#             seed_context,
    -#             self.build_context('seed', 'end', result_type='ok')
    -#         ]
    -
    -#         test_result_A = self.run_event_test(["seed"], expected_calls_A, expected_contexts)
    -#         test_result_B = self.run_event_test(["seed"], expected_calls_B, expected_contexts)
    -        
    -#         self.assertTrue(test_result_A or test_result_B)
    -
    -#     @use_profile("postgres")
    -#     def test__postgres_event_tracking_models(self):
    -#         expected_calls_A = [
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='start',
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='load_project',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='resource_counts',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='run_model',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='run_model',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='end',
    -#                 context=ANY
    -#             ),
    -#         ]
    -
    -#         expected_calls_B = inserted(
    -#             call(
    -#                 category='dbt',
    -#                 action='experimental_parser',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             3,
    -#             expected_calls_A
    -#         )
    -
    -#         hashed = '20ff78afb16c8b3b8f83861b1d3b99bd'
    -#         # this hashed contents field changes on azure postgres tests, I believe
    -#         # due to newlines again
    -#         if os.name == 'nt':
    -#             hashed = '52cf9d1db8f0a18ca64ef64681399746'
    -
    -#         expected_contexts = [
    -#             self.build_context('run', 'start'),
    -#             self.load_context(),
    -#             self.resource_counts_context(),
    -#             self.run_context(
    -#                 hashed_contents='1e5789d34cddfbd5da47d7713aa9191c',
    -#                 model_id='4fbacae0e1b69924b22964b457148fb8',
    -#                 index=1,
    -#                 total=2,
    -#                 status='SUCCESS',
    -#                 materialization='view'
    -#             ),
    -#             self.run_context(
    -#                 hashed_contents=hashed,
    -#                 model_id='57994a805249953b31b738b1af7a1eeb',
    -#                 index=2,
    -#                 total=2,
    -#                 status='SUCCESS',
    -#                 materialization='view'
    -#             ),
    -#             self.build_context('run', 'end', result_type='ok')
    -#         ]
    -
    -#         test_result_A = self.run_event_test(
    -#             ["run", "--model", "example", "example_2"],
    -#             expected_calls_A,
    -#             expected_contexts
    -#         )
    -
    -#         test_result_B = self.run_event_test(
    -#             ["run", "--model", "example", "example_2"],
    -#             expected_calls_A,
    -#             expected_contexts
    -#         )
    -
    -#         self.assertTrue(test_result_A or test_result_B)
    -
    -#     @use_profile("postgres")
    -#     def test__postgres_event_tracking_model_error(self):
    -#         # cmd = ["run", "--model", "model_error"]
    -#         # self.run_event_test(cmd, event_run_model_error, expect_pass=False)
    -
    -#         expected_calls_A = [
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='start',
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='load_project',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='resource_counts',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='run_model',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='end',
    -#                 context=ANY
    -#             ),
    -#         ]
    -
    -#         expected_calls_B = inserted(
    -#             call(
    -#                 category='dbt',
    -#                 action='experimental_parser',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             3,
    -#             expected_calls_A
    -#         )
    -
    -#         expected_contexts = [
    -#             self.build_context('run', 'start'),
    -#             self.load_context(),
    -#             self.resource_counts_context(),
    -#             self.run_context(
    -#                 hashed_contents='4419e809ce0995d99026299e54266037',
    -#                 model_id='576c3d4489593f00fad42b97c278641e',
    -#                 index=1,
    -#                 total=1,
    -#                 status='ERROR',
    -#                 materialization='view'
    -#             ),
    -#             self.build_context('run', 'end', result_type='ok')
    -#         ]
    -
    -#         test_result_A = self.run_event_test(
    -#             ["run", "--model", "model_error"],
    -#             expected_calls_A,
    -#             expected_contexts,
    -#             expect_pass=False
    -#         )
    -
    -#         test_result_B = self.run_event_test(
    -#             ["run", "--model", "model_error"],
    -#             expected_calls_B,
    -#             expected_contexts,
    -#             expect_pass=False
    -#         )
    -
    -#         self.assertTrue(test_result_A or test_result_B)
    -
    -#     @use_profile("postgres")
    -#     def test__postgres_event_tracking_tests(self):
    -#         # TODO: dbt does not track events for tests, but it should!
    -#         self.run_dbt(["deps"])
    -#         self.run_dbt(["run", "--model", "example", "example_2"])
    -
    -#         expected_calls_A = [
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='start',
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='load_project',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='resource_counts',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='end',
    -#                 context=ANY
    -#             ),
    -#         ]
    -
    -#         expected_calls_B = inserted(
    -#             call(
    -#                 category='dbt',
    -#                 action='experimental_parser',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             3,
    -#             expected_calls_A
    -#         )
    -
    -#         expected_contexts = [
    -#             self.build_context('test', 'start'),
    -#             self.load_context(),
    -#             self.resource_counts_context(),
    -#             self.build_context('test', 'end', result_type='ok')
    -#         ]
    -
    -#         test_result_A = self.run_event_test(
    -#             ["test"],
    -#             expected_calls_A,
    -#             expected_contexts,
    -#             expect_pass=False
    -#         )
    -
    -#         test_result_B = self.run_event_test(
    -#             ["test"],
    -#             expected_calls_A,
    -#             expected_contexts,
    -#             expect_pass=False
    -#         )
    -
    -#         self.assertTrue(test_result_A or test_result_B)
    -
    -
    -# class TestEventTrackingCompilationError(TestEventTracking):
    -#     @property
    -#     def project_config(self):
    -#         return {
    -#             'config-version': 2,
    -#             "model-paths": [self.dir("model-compilation-error")],
    -#         }
    -
    -#     @use_profile("postgres")
    -#     def test__postgres_event_tracking_with_compilation_error(self):
    -#         expected_calls = [
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='start',
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='end',
    -#                 context=ANY
    -#             ),
    -#         ]
    -
    -#         expected_contexts = [
    -#             self.build_context('compile', 'start'),
    -#             self.build_context('compile', 'end', result_type='error')
    -#         ]
    -
    -#         test_result = self.run_event_test(
    -#             ["compile"],
    -#             expected_calls,
    -#             expected_contexts,
    -#             expect_pass=False,
    -#             expect_raise=True
    -#         )
    -
    -#         self.assertTrue(test_result)
    -
    -
    -# class TestEventTrackingUnableToConnect(TestEventTracking):
    -
    -#     @property
    -#     def profile_config(self):
    -#         return {
    -#             'config': {
    -#                 'send_anonymous_usage_stats': True
    -#             },
    -#             'test': {
    -#                 'outputs': {
    -#                     'default2': {
    -#                         'type': 'postgres',
    -#                         'threads': 4,
    -#                         'host': self.database_host,
    -#                         'port': 5432,
    -#                         'user': 'root',
    -#                         'pass': 'password',
    -#                         'dbname': 'dbt',
    -#                         'schema': self.unique_schema()
    -#                     },
    -#                     'noaccess': {
    -#                         'type': 'postgres',
    -#                         'threads': 4,
    -#                         'host': self.database_host,
    -#                         'port': 5432,
    -#                         'user': 'BAD',
    -#                         'pass': 'bad_password',
    -#                         'dbname': 'dbt',
    -#                         'schema': self.unique_schema()
    -#                     }
    -#                 },
    -#                 'target': 'default2'
    -#             }
    -#         }
    -
    -#     @use_profile("postgres")
    -#     def test__postgres_event_tracking_unable_to_connect(self):
    -#         expected_calls_A = [
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='start',
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='load_project',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='resource_counts',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='end',
    -#                 context=ANY
    -#             ),
    -#         ]
    -
    -#         expected_calls_B = inserted(
    -#             call(
    -#                 category='dbt',
    -#                 action='experimental_parser',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             3,
    -#             expected_calls_A
    -#         )
    -
    -#         expected_contexts = [
    -#             self.build_context('run', 'start'),
    -#             self.load_context(),
    -#             self.resource_counts_context(),
    -#             self.build_context('run', 'end', result_type='error')
    -#         ]
    -
    -#         test_result_A = self.run_event_test(
    -#             ["run", "--target", "noaccess", "--models", "example"],
    -#             expected_calls_A,
    -#             expected_contexts,
    -#             expect_pass=False
    -#         )
    -
    -#         test_result_B = self.run_event_test(
    -#             ["run", "--target", "noaccess", "--models", "example"],
    -#             expected_calls_B,
    -#             expected_contexts,
    -#             expect_pass=False
    -#         )
    -
    -#         self.assertTrue(test_result_A or test_result_B)
    -
    -
    -# class TestEventTrackingSnapshot(TestEventTracking):
    -#     @property
    -#     def project_config(self):
    -#         return {
    -#             'config-version': 2,
    -#             "snapshot-paths": ['snapshots']
    -#         }
    -
    -#     @use_profile("postgres")
    -#     def test__postgres_event_tracking_snapshot(self):
    -#         self.run_dbt(["run", "--models", "snapshottable"])
    -
    -#         expected_calls_A = [
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='start',
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='load_project',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='resource_counts',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='run_model',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='end',
    -#                 context=ANY
    -#             ),
    -#         ]
    -
    -#         expected_calls_B = inserted(
    -#             call(
    -#                 category='dbt',
    -#                 action='experimental_parser',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             3,
    -#             expected_calls_A
    -#         )
    -
    -#         # the model here has a raw_code that contains the schema, which changes
    -#         expected_contexts = [
    -#             self.build_context('snapshot', 'start'),
    -#             self.load_context(),
    -#             self.resource_counts_context(),
    -#             self.run_context(
    -#                 hashed_contents=ANY,
    -#                 model_id='820793a4def8d8a38d109a9709374849',
    -#                 index=1,
    -#                 total=1,
    -#                 status='SUCCESS',
    -#                 materialization='snapshot'
    -#             ),
    -#             self.build_context('snapshot', 'end', result_type='ok')
    -#         ]
    -
    -#         test_result_A = self.run_event_test(
    -#             ["snapshot"],
    -#             expected_calls_A,
    -#             expected_contexts
    -#         )
    -
    -#         test_result_B = self.run_event_test(
    -#             ["snapshot"],
    -#             expected_calls_B,
    -#             expected_contexts
    -#         )
    -
    -#         self.assertTrue(test_result_A or test_result_B)
    -
    -
    -# class TestEventTrackingCatalogGenerate(TestEventTracking):
    -#     @use_profile("postgres")
    -#     def test__postgres_event_tracking_catalog_generate(self):
    -#         # create a model for the catalog
    -#         self.run_dbt(["run", "--models", "example"])
    -
    -#         expected_calls_A = [
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='start',
    -#                 context=ANY
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='load_project',
    -#                 label=ANY,
    -#                 context=ANY,
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='resource_counts',
    -#                 label=ANY,
    -#                 context=ANY,
    -#             ),
    -#             call(
    -#                 category='dbt',
    -#                 action='invocation',
    -#                 label='end',
    -#                 context=ANY
    -#             ),
    -#         ]
    -
    -#         expected_calls_B = inserted(
    -#             call(
    -#                 category='dbt',
    -#                 action='experimental_parser',
    -#                 label=ANY,
    -#                 context=ANY
    -#             ),
    -#             3,
    -#             expected_calls_A
    -#         )
    -
    -#         expected_contexts = [
    -#             self.build_context('generate', 'start'),
    -#             self.load_context(),
    -#             self.resource_counts_context(),
    -#             self.build_context('generate', 'end', result_type='ok')
    -#         ]
    -
    -#         test_result_A = self.run_event_test(
    -#             ["docs", "generate"],
    -#             expected_calls_A,
    -#             expected_contexts
    -#         )
    -
    -#         test_result_B = self.run_event_test(
    -#             ["docs", "generate"],
    -#             expected_calls_B,
    -#             expected_contexts
    -#         )
    -
    -#         self.assertTrue(test_result_A or test_result_B)
    diff --git a/test/integration/035_docs_blocks_tests/test_docs_blocks.py b/test/integration/035_docs_blocks_tests/test_docs_blocks.py
    deleted file mode 100644
    index dacddf394f9..00000000000
    --- a/test/integration/035_docs_blocks_tests/test_docs_blocks.py
    +++ /dev/null
    @@ -1,184 +0,0 @@
    -import json
    -import os
    -
    -from test.integration.base import DBTIntegrationTest, use_profile
    -
    -import dbt.exceptions
    -
    -class TestGoodDocsBlocks(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return 'docs_blocks_035'
    -
    -    @staticmethod
    -    def dir(path):
    -        return os.path.normpath(path)
    -
    -    @property
    -    def models(self):
    -        return self.dir("models")
    -
    -    @use_profile('postgres')
    -    def test_postgres_valid_doc_ref(self):
    -        self.assertEqual(len(self.run_dbt()), 1)
    -
    -        self.assertTrue(os.path.exists('./target/manifest.json'))
    -
    -        with open('./target/manifest.json') as fp:
    -            manifest = json.load(fp)
    -
    -        model_data = manifest['nodes']['model.test.model']
    -        self.assertEqual(
    -            model_data['description'],
    -            'My model is just a copy of the seed'
    -        )
    -        self.assertEqual(
    -            {
    -                'name': 'id',
    -                'description': 'The user ID number',
    -                'data_type': None,
    -                'meta': {},
    -                'quote': None,
    -                'tags': [],
    -            },
    -            model_data['columns']['id']
    -        )
    -        self.assertEqual(
    -            {
    -                'name': 'first_name',
    -                'description': "The user's first name",
    -                'data_type': None,
    -                'meta': {},
    -                'quote': None,
    -                'tags': [],
    -            },
    -            model_data['columns']['first_name']
    -        )
    -
    -        self.assertEqual(
    -            {
    -                'name': 'last_name',
    -                'description': "The user's last name",
    -                'data_type': None,
    -                'meta': {},
    -                'quote': None,
    -                'tags': [],
    -            },
    -            model_data['columns']['last_name']
    -        )
    -        self.assertEqual(len(model_data['columns']), 3)
    -
    -    @use_profile('postgres')
    -    def test_postgres_alternative_docs_path(self):
    -        self.use_default_project({"docs-paths": [self.dir("docs")]})
    -        self.assertEqual(len(self.run_dbt()), 1)
    -
    -        self.assertTrue(os.path.exists('./target/manifest.json'))
    -
    -        with open('./target/manifest.json') as fp:
    -            manifest = json.load(fp)
    -
    -        model_data = manifest['nodes']['model.test.model']
    -        self.assertEqual(
    -            model_data['description'],
    -            'Alt text about the model'
    -        )
    -        self.assertEqual(
    -            {
    -                'name': 'id',
    -                'description': 'The user ID number with alternative text',
    -                'data_type': None,
    -                'meta': {},
    -                'quote': None,
    -                'tags': [],
    -            },
    -            model_data['columns']['id']
    -        )
    -        self.assertEqual(
    -            {
    -                'name': 'first_name',
    -                'description': "The user's first name",
    -                'data_type': None,
    -                'meta': {},
    -                'quote': None,
    -                'tags': [],
    -            },
    -            model_data['columns']['first_name']
    -        )
    -
    -        self.assertEqual(
    -            {
    -                'name': 'last_name',
    -                'description': "The user's last name in this other file",
    -                'data_type': None,
    -                'meta': {},
    -                'quote': None,
    -                'tags': [],
    -            },
    -            model_data['columns']['last_name']
    -        )
    -        self.assertEqual(len(model_data['columns']), 3)
    -
    -    @use_profile('postgres')
    -    def test_postgres_alternative_docs_path_missing(self):
    -        self.use_default_project({"docs-paths": [self.dir("not-docs")]})
    -        with self.assertRaises(dbt.exceptions.CompilationException):
    -            self.run_dbt()
    -
    -
    -class TestMissingDocsBlocks(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return 'docs_blocks_035'
    -
    -    @staticmethod
    -    def dir(path):
    -        return os.path.normpath(path)
    -
    -    @property
    -    def models(self):
    -        return self.dir("missing_docs_models")
    -
    -    @use_profile('postgres')
    -    def test_postgres_missing_doc_ref(self):
    -        # The run should fail since we could not find the docs reference.
    -        with self.assertRaises(dbt.exceptions.CompilationException):
    -            self.run_dbt()
    -
    -
    -class TestBadDocsBlocks(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return 'docs_blocks_035'
    -
    -    @staticmethod
    -    def dir(path):
    -        return os.path.normpath(path)
    -
    -    @property
    -    def models(self):
    -        return self.dir("invalid_name_models")
    -
    -    @use_profile('postgres')
    -    def test_postgres_invalid_doc_ref(self):
    -        # The run should fail since we could not find the docs reference.
    -        with self.assertRaises(dbt.exceptions.CompilationException):
    -            self.run_dbt(expect_pass=False)
    -
    -class TestDuplicateDocsBlock(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return 'docs_blocks_035'
    -
    -    @staticmethod
    -    def dir(path):
    -        return os.path.normpath(path)
    -
    -    @property
    -    def models(self):
    -        return self.dir("duplicate_docs")
    -
    -    @use_profile('postgres')
    -    def test_postgres_duplicate_doc_ref(self):
    -        with self.assertRaises(dbt.exceptions.CompilationException):
    -            self.run_dbt(expect_pass=False)
    diff --git a/test/integration/037_external_reference_tests/models/my_model.sql b/test/integration/037_external_reference_tests/models/my_model.sql
    deleted file mode 100644
    index 5d10e607ed7..00000000000
    --- a/test/integration/037_external_reference_tests/models/my_model.sql
    +++ /dev/null
    @@ -1,7 +0,0 @@
    -{{
    -  config(
    -    materialized = "view"
    -  )
    -}}
    -
    -select * from "{{ this.schema + 'z' }}"."external"
    diff --git a/test/integration/037_external_reference_tests/standalone_models/my_model.sql b/test/integration/037_external_reference_tests/standalone_models/my_model.sql
    deleted file mode 100644
    index 2cd691ea7b4..00000000000
    --- a/test/integration/037_external_reference_tests/standalone_models/my_model.sql
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -
    -select 1 as id
    diff --git a/test/integration/037_external_reference_tests/test_external_reference.py b/test/integration/037_external_reference_tests/test_external_reference.py
    deleted file mode 100644
    index d5a7e129e3a..00000000000
    --- a/test/integration/037_external_reference_tests/test_external_reference.py
    +++ /dev/null
    @@ -1,78 +0,0 @@
    -from test.integration.base import DBTIntegrationTest, use_profile
    -
    -class TestExternalReference(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return "external_reference_037"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    def setUp(self):
    -        super().setUp()
    -        self.use_default_project()
    -        self.external_schema = self.unique_schema()+'z'
    -        self.run_sql(
    -            'create schema "{}"'.format(self.external_schema)
    -        )
    -        self.run_sql(
    -            'create table "{}"."external" (id integer)'
    -            .format(self.external_schema)
    -        )
    -        self.run_sql(
    -            'insert into "{}"."external" values (1), (2)'
    -            .format(self.external_schema)
    -        )
    -
    -    def tearDown(self):
    -        # This has to happen before we drop the external schema, because
    -        # otherwise postgres hangs forever.
    -        self._drop_schemas()
    -        with self.get_connection():
    -            self._drop_schema_named(self.default_database, self.external_schema)
    -        super().tearDown()
    -
    -    @use_profile('postgres')
    -    def test__postgres__external_reference(self):
    -        self.assertEqual(len(self.run_dbt()), 1)
    -        # running it again should succeed
    -        self.assertEqual(len(self.run_dbt()), 1)
    -
    -
    -# The opposite of the test above -- check that external relations that
    -# depend on a dbt model do not create issues with caching
    -class TestExternalDependency(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return "external_dependency_037"
    -
    -    @property
    -    def models(self):
    -        return "standalone_models"
    -
    -    def tearDown(self):
    -        # This has to happen before we drop the external schema, because
    -        # otherwise postgres hangs forever.
    -        self._drop_schemas()
    -        with self.get_connection():
    -            self._drop_schema_named(self.default_database, self.external_schema)
    -        super().tearDown()
    -
    -    @use_profile('postgres')
    -    def test__postgres__external_reference(self):
    -        self.assertEqual(len(self.run_dbt()), 1)
    -
    -        # create a view outside of the dbt schema that depends on this model
    -        self.external_schema = self.unique_schema()+'zz'
    -        self.run_sql(
    -            'create schema "{}"'.format(self.external_schema)
    -        )
    -        self.run_sql(
    -            'create view "{}"."external" as (select * from {}.my_model)'
    -            .format(self.external_schema, self.unique_schema())
    -        )
    -
    -        # running it again should succeed
    -        self.assertEqual(len(self.run_dbt()), 1)
    -
    diff --git a/test/integration/038_caching_tests/test_caching.py b/test/integration/038_caching_tests/test_caching.py
    deleted file mode 100644
    index 1967e912628..00000000000
    --- a/test/integration/038_caching_tests/test_caching.py
    +++ /dev/null
    @@ -1,67 +0,0 @@
    -from test.integration.base import DBTIntegrationTest, use_profile
    -from dbt.adapters.factory import FACTORY
    -
    -class TestBaseCaching(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return "caching_038"
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'quoting': {
    -                'identifier': False,
    -                'schema': False,
    -            }
    -        }
    -
    -    def run_and_get_adapter(self):
    -        # we want to inspect the adapter that dbt used for the run, which is
    -        # not self.adapter. You can't do this until after you've run dbt once.
    -        self.run_dbt(['run'])
    -        return FACTORY.adapters[self.adapter_type]
    -
    -    def cache_run(self):
    -        adapter = self.run_and_get_adapter()
    -        self.assertEqual(len(adapter.cache.relations), 1)
    -        relation = next(iter(adapter.cache.relations.values()))
    -        self.assertEqual(relation.inner.schema, self.unique_schema())
    -        self.assertEqual(relation.schema, self.unique_schema().lower())
    -
    -        self.run_dbt(['run'])
    -        self.assertEqual(len(adapter.cache.relations), 1)
    -        second_relation = next(iter(adapter.cache.relations.values()))
    -        self.assertEqual(relation, second_relation)
    -
    -class TestCachingLowercaseModel(TestBaseCaching):
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @use_profile('postgres')
    -    def test_postgres_cache(self):
    -        self.cache_run()
    -
    -class TestCachingUppercaseModel(TestBaseCaching):
    -    @property
    -    def models(self):
    -        return "shouting_models"
    -
    -    @use_profile('postgres')
    -    def test_postgres_cache(self):
    -        self.cache_run()
    -
    -class TestCachingSelectedSchemaOnly(TestBaseCaching):
    -    @property
    -    def models(self):
    -        return "models_multi_schemas"
    -        
    -    def run_and_get_adapter(self):
    -        # select only the 'model' in the default schema
    -        self.run_dbt(['--cache-selected-only', 'run', '--select', 'model'])
    -        return FACTORY.adapters[self.adapter_type]
    -
    -    @use_profile('postgres')
    -    def test_postgres_cache(self):
    -        self.cache_run()
    diff --git a/test/integration/040_init_tests/test_init.py b/test/integration/040_init_tests/test_init.py
    deleted file mode 100644
    index 6a814fa7794..00000000000
    --- a/test/integration/040_init_tests/test_init.py
    +++ /dev/null
    @@ -1,755 +0,0 @@
    -import os
    -import shutil
    -from unittest import mock
    -from unittest.mock import Mock, call
    -from pathlib import Path
    -
    -import click
    -
    -from test.integration.base import DBTIntegrationTest, use_profile
    -from pytest import mark
    -
    -class TestInit(DBTIntegrationTest):
    -    def tearDown(self):
    -        project_name = self.get_project_name()
    -
    -        if os.path.exists(project_name):
    -            shutil.rmtree(project_name)
    -
    -        super().tearDown()
    -
    -    def get_project_name(self):
    -        return 'my_project_{}'.format(self.unique_schema())
    -
    -    @property
    -    def schema(self):
    -        return 'init_040'
    -
    -    @property
    -    def models(self):
    -        return 'models'
    -
    -    # See CT-570 / GH 5180
    -    @mark.skip(
    -      reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171"
    -    )
    -    @use_profile('postgres')
    -    @mock.patch('dbt.task.init._get_adapter_plugin_names')
    -    @mock.patch('click.confirm')
    -    @mock.patch('click.prompt')
    -    def test_postgres_init_task_in_project_with_existing_profiles_yml(self, mock_prompt, mock_confirm, mock_get_adapter):
    -        manager = Mock()
    -        manager.attach_mock(mock_prompt, 'prompt')
    -        manager.attach_mock(mock_confirm, 'confirm')
    -        manager.confirm.side_effect = ["y"]
    -        manager.prompt.side_effect = [
    -            1,
    -            'localhost',
    -            5432,
    -            'test_user',
    -            'test_password',
    -            'test_db',
    -            'test_schema',
    -            4,
    -        ]
    -        mock_get_adapter.return_value = [1]
    -
    -        self.run_dbt(['init'])
    -
    -        manager.assert_has_calls([
    -            call.confirm(f"The profile test already exists in {os.path.join(self.test_root_dir, 'profiles.yml')}. Continue and overwrite it?"),
    -            call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT),
    -            call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None),
    -            call.prompt('port', default=5432, hide_input=False, type=click.INT),
    -            call.prompt('user (dev username)', default=None, hide_input=False, type=None),
    -            call.prompt('pass (dev password)', default=None, hide_input=True, type=None),
    -            call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None),
    -            call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None),
    -            call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT),
    -        ])
    -
    -        with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
    -            assert f.read() == """config:
    -  send_anonymous_usage_stats: false
    -test:
    -  outputs:
    -    dev:
    -      dbname: test_db
    -      host: localhost
    -      pass: test_password
    -      port: 5432
    -      schema: test_schema
    -      threads: 4
    -      type: postgres
    -      user: test_user
    -  target: dev
    -"""
    -
    -  # See CT-570 / GH 5180
    -    @mark.skip(
    -      reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171"
    -    )
    -    @use_profile('postgres')
    -    @mock.patch('dbt.task.init._get_adapter_plugin_names')
    -    @mock.patch('click.confirm')
    -    @mock.patch('click.prompt')
    -    @mock.patch.object(Path, 'exists', autospec=True)
    -    def test_postgres_init_task_in_project_without_existing_profiles_yml(self, exists, mock_prompt, mock_confirm, mock_get_adapter):
    -
    -        def exists_side_effect(path):
    -            # Override responses on specific files, default to 'real world' if not overriden
    -            return {
    -                'profiles.yml': False
    -            }.get(path.name, os.path.exists(path))
    -
    -        exists.side_effect = exists_side_effect
    -        manager = Mock()
    -        manager.attach_mock(mock_prompt, 'prompt')
    -        manager.prompt.side_effect = [
    -            1,
    -            'localhost',
    -            5432,
    -            'test_user',
    -            'test_password',
    -            'test_db',
    -            'test_schema',
    -            4,
    -        ]
    -        mock_get_adapter.return_value = [1]
    -
    -        self.run_dbt(['init'])
    -
    -        manager.assert_has_calls([
    -            call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT),
    -            call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None),
    -            call.prompt('port', default=5432, hide_input=False, type=click.INT),
    -            call.prompt('user (dev username)', default=None, hide_input=False, type=None),
    -            call.prompt('pass (dev password)', default=None, hide_input=True, type=None),
    -            call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None),
    -            call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None),
    -            call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT)
    -        ])
    -
    -        with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
    -            assert f.read() == """test:
    -  outputs:
    -    dev:
    -      dbname: test_db
    -      host: localhost
    -      pass: test_password
    -      port: 5432
    -      schema: test_schema
    -      threads: 4
    -      type: postgres
    -      user: test_user
    -  target: dev
    -"""
    -
    -    # See CT-570 / GH 5180
    -    @mark.skip(
    -      reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171"
    -    )
    -    @use_profile('postgres')
    -    @mock.patch('dbt.task.init._get_adapter_plugin_names')
    -    @mock.patch('click.confirm')
    -    @mock.patch('click.prompt')
    -    @mock.patch.object(Path, 'exists', autospec=True)
    -    def test_postgres_init_task_in_project_without_existing_profiles_yml_or_profile_template(self, exists, mock_prompt, mock_confirm, mock_get_adapter):
    -
    -        def exists_side_effect(path):
    -            # Override responses on specific files, default to 'real world' if not overriden
    -            return {
    -                'profiles.yml': False,
    -                'profile_template.yml': False,
    -            }.get(path.name, os.path.exists(path))
    -
    -        exists.side_effect = exists_side_effect
    -        manager = Mock()
    -        manager.attach_mock(mock_prompt, 'prompt')
    -        manager.attach_mock(mock_confirm, 'confirm')
    -        manager.prompt.side_effect = [
    -            1,
    -        ]
    -        mock_get_adapter.return_value = [1]
    -        self.run_dbt(['init'])
    -        manager.assert_has_calls([
    -            call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT),
    -        ])
    -
    -        with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
    -            assert f.read() == """test:
    -  outputs:
    -
    -    dev:
    -      type: postgres
    -      threads: [1 or more]
    -      host: [host]
    -      port: [port]
    -      user: [dev_username]
    -      pass: [dev_password]
    -      dbname: [dbname]
    -      schema: [dev_schema]
    -
    -    prod:
    -      type: postgres
    -      threads: [1 or more]
    -      host: [host]
    -      port: [port]
    -      user: [prod_username]
    -      pass: [prod_password]
    -      dbname: [dbname]
    -      schema: [prod_schema]
    -
    -  target: dev
    -"""
    -
    -    @use_profile('postgres')
    -    @mock.patch('dbt.task.init._get_adapter_plugin_names')
    -    @mock.patch('click.confirm')
    -    @mock.patch('click.prompt')
    -    @mock.patch.object(Path, 'exists', autospec=True)
    -    def test_postgres_init_task_in_project_with_profile_template_without_existing_profiles_yml(self, exists, mock_prompt, mock_confirm, mock_get_adapter):
    -
    -        def exists_side_effect(path):
    -            # Override responses on specific files, default to 'real world' if not overriden
    -            return {
    -                'profiles.yml': False,
    -            }.get(path.name, os.path.exists(path))
    -        exists.side_effect = exists_side_effect
    -
    -        with open("profile_template.yml", 'w') as f:
    -            f.write("""fixed:
    -  type: postgres
    -  threads: 4
    -  host: localhost
    -  dbname: my_db
    -  schema: my_schema
    -  target: my_target
    -prompts:
    -  target:
    -    hint: 'The target name'
    -    type: string
    -  port:
    -    hint: 'The port (for integer test purposes)'
    -    type: int
    -    default: 5432
    -  user:
    -    hint: 'Your username'
    -  pass:
    -    hint: 'Your password'
    -    hide_input: true""")
    -
    -        manager = Mock()
    -        manager.attach_mock(mock_prompt, 'prompt')
    -        manager.attach_mock(mock_confirm, 'confirm')
    -        manager.prompt.side_effect = [
    -            'my_target',
    -            5432,
    -            'test_username',
    -            'test_password'
    -        ]
    -        mock_get_adapter.return_value = [1]
    -        self.run_dbt(['init'])
    -        manager.assert_has_calls([
    -            call.prompt('target (The target name)', default=None, hide_input=False, type=click.STRING),
    -            call.prompt('port (The port (for integer test purposes))', default=5432, hide_input=False, type=click.INT),
    -            call.prompt('user (Your username)', default=None, hide_input=False, type=None),
    -            call.prompt('pass (Your password)', default=None, hide_input=True, type=None)
    -        ])
    -
    -        with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
    -            assert f.read() == """test:
    -  outputs:
    -    my_target:
    -      dbname: my_db
    -      host: localhost
    -      pass: test_password
    -      port: 5432
    -      schema: my_schema
    -      threads: 4
    -      type: postgres
    -      user: test_username
    -  target: my_target
    -"""
    -    # See CT-570 / GH 5180
    -    @mark.skip(
    -      reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171"
    -    )
    -    @use_profile('postgres')
    -    @mock.patch('dbt.task.init._get_adapter_plugin_names')
    -    @mock.patch('click.confirm')
    -    @mock.patch('click.prompt')
    -    def test_postgres_init_task_in_project_with_invalid_profile_template(self, mock_prompt, mock_confirm, mock_get_adapter):
    -        """Test that when an invalid profile_template.yml is provided in the project,
    -        init command falls back to the target's profile_template.yml"""
    -
    -        with open("profile_template.yml", 'w') as f:
    -            f.write("""invalid template""")
    -
    -        manager = Mock()
    -        manager.attach_mock(mock_prompt, 'prompt')
    -        manager.attach_mock(mock_confirm, 'confirm')
    -        manager.confirm.side_effect = ["y"]
    -        manager.prompt.side_effect = [
    -            1,
    -            'localhost',
    -            5432,
    -            'test_username',
    -            'test_password',
    -            'test_db',
    -            'test_schema',
    -            4,
    -        ]
    -        mock_get_adapter.return_value = [1]
    -
    -        self.run_dbt(['init'])
    -
    -        manager.assert_has_calls([
    -            call.confirm(f"The profile test already exists in {os.path.join(self.test_root_dir, 'profiles.yml')}. Continue and overwrite it?"),
    -            call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT),
    -            call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None),
    -            call.prompt('port', default=5432, hide_input=False, type=click.INT),
    -            call.prompt('user (dev username)', default=None, hide_input=False, type=None),
    -            call.prompt('pass (dev password)', default=None, hide_input=True, type=None),
    -            call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None),
    -            call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None),
    -            call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT)
    -        ])
    -
    -        with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
    -            assert f.read() == """config:
    -  send_anonymous_usage_stats: false
    -test:
    -  outputs:
    -    dev:
    -      dbname: test_db
    -      host: localhost
    -      pass: test_password
    -      port: 5432
    -      schema: test_schema
    -      threads: 4
    -      type: postgres
    -      user: test_username
    -  target: dev
    -"""
    -    # See CT-570 / GH 5180
    -    @mark.skip(
    -      reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171"
    -    )
    -    @use_profile('postgres')
    -    @mock.patch('dbt.task.init._get_adapter_plugin_names')
    -    @mock.patch('click.confirm')
    -    @mock.patch('click.prompt')
    -    def test_postgres_init_task_outside_of_project(self, mock_prompt, mock_confirm, mock_get_adapter):
    -        manager = Mock()
    -        manager.attach_mock(mock_prompt, 'prompt')
    -        manager.attach_mock(mock_confirm, 'confirm')
    -
    -        # Start by removing the dbt_project.yml so that we're not in an existing project
    -        os.remove('dbt_project.yml')
    -
    -        project_name = self.get_project_name()
    -        manager.prompt.side_effect = [
    -            project_name,
    -            1,
    -            'localhost',
    -            5432,
    -            'test_username',
    -            'test_password',
    -            'test_db',
    -            'test_schema',
    -            4,
    -        ]
    -        mock_get_adapter.return_value = [1]
    -        self.run_dbt(['init'])
    -        manager.assert_has_calls([
    -            call.prompt("Enter a name for your project (letters, digits, underscore)"),
    -            call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT),
    -            call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None),
    -            call.prompt('port', default=5432, hide_input=False, type=click.INT),
    -            call.prompt('user (dev username)', default=None, hide_input=False, type=None),
    -            call.prompt('pass (dev password)', default=None, hide_input=True, type=None),
    -            call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None),
    -            call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None),
    -            call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT),
    -        ])
    -
    -        with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
    -            assert f.read() == f"""config:
    -  send_anonymous_usage_stats: false
    -{project_name}:
    -  outputs:
    -    dev:
    -      dbname: test_db
    -      host: localhost
    -      pass: test_password
    -      port: 5432
    -      schema: test_schema
    -      threads: 4
    -      type: postgres
    -      user: test_username
    -  target: dev
    -test:
    -  outputs:
    -    default2:
    -      dbname: dbt
    -      host: localhost
    -      pass: password
    -      port: 5432
    -      schema: {self.unique_schema()}
    -      threads: 4
    -      type: postgres
    -      user: root
    -    noaccess:
    -      dbname: dbt
    -      host: localhost
    -      pass: password
    -      port: 5432
    -      schema: {self.unique_schema()}
    -      threads: 4
    -      type: postgres
    -      user: noaccess
    -  target: default2
    -"""
    -
    -        with open(os.path.join(self.test_root_dir, project_name, 'dbt_project.yml'), 'r') as f:
    -            assert f.read() == f"""
    -# Name your project! Project names should contain only lowercase characters
    -# and underscores. A good package name should reflect your organization's
    -# name or the intended use of these models
    -name: '{project_name}'
    -version: '1.0.0'
    -config-version: 2
    -
    -# This setting configures which "profile" dbt uses for this project.
    -profile: '{project_name}'
    -
    -# These configurations specify where dbt should look for different types of files.
    -# The `model-paths` config, for example, states that models in this project can be
    -# found in the "models/" directory. You probably won't need to change these!
    -model-paths: ["models"]
    -analysis-paths: ["analyses"]
    -test-paths: ["tests"]
    -seed-paths: ["seeds"]
    -macro-paths: ["macros"]
    -snapshot-paths: ["snapshots"]
    -
    -target-path: "target"  # directory which will store compiled SQL files
    -clean-targets:         # directories to be removed by `dbt clean`
    -  - "target"
    -  - "dbt_packages"
    -
    -
    -# Configuring models
    -# Full documentation: https://docs.getdbt.com/docs/configuring-models
    -
    -# In this example config, we tell dbt to build all models in the example/
    -# directory as views. These settings can be overridden in the individual model
    -# files using the `{{{{ config(...) }}}}` macro.
    -models:
    -  {project_name}:
    -    # Config indicated by + and applies to all files under models/example/
    -    example:
    -      +materialized: view
    -"""
    -    # See CT-570 / GH 5180
    -    @mark.skip(
    -      reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171"
    -    )
    -    @use_profile('postgres')
    -    @mock.patch('dbt.task.init._get_adapter_plugin_names')
    -    @mock.patch('click.confirm')
    -    @mock.patch('click.prompt')
    -    def test_postgres_init_with_provided_project_name(self, mock_prompt, mock_confirm, mock_get_adapter):
    -        manager = Mock()
    -        manager.attach_mock(mock_prompt, 'prompt')
    -        manager.attach_mock(mock_confirm, 'confirm')
    -
    -        # Start by removing the dbt_project.yml so that we're not in an existing project
    -        os.remove('dbt_project.yml')
    -
    -        manager.prompt.side_effect = [
    -            1,
    -            'localhost',
    -            5432,
    -            'test_username',
    -            'test_password',
    -            'test_db',
    -            'test_schema',
    -            4,
    -        ]
    -        mock_get_adapter.return_value = [1]
    -
    -        # Provide project name through the init command.
    -        project_name = self.get_project_name()
    -        self.run_dbt(['init', project_name])
    -        manager.assert_has_calls([
    -            call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT),
    -            call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None),
    -            call.prompt('port', default=5432, hide_input=False, type=click.INT),
    -            call.prompt('user (dev username)', default=None, hide_input=False, type=None),
    -            call.prompt('pass (dev password)', default=None, hide_input=True, type=None),
    -            call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None),
    -            call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None),
    -            call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT),
    -        ])
    -
    -        with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
    -            assert f.read() == f"""config:
    -  send_anonymous_usage_stats: false
    -{project_name}:
    -  outputs:
    -    dev:
    -      dbname: test_db
    -      host: localhost
    -      pass: test_password
    -      port: 5432
    -      schema: test_schema
    -      threads: 4
    -      type: postgres
    -      user: test_username
    -  target: dev
    -test:
    -  outputs:
    -    default2:
    -      dbname: dbt
    -      host: localhost
    -      pass: password
    -      port: 5432
    -      schema: {self.unique_schema()}
    -      threads: 4
    -      type: postgres
    -      user: root
    -    noaccess:
    -      dbname: dbt
    -      host: localhost
    -      pass: password
    -      port: 5432
    -      schema: {self.unique_schema()}
    -      threads: 4
    -      type: postgres
    -      user: noaccess
    -  target: default2
    -"""
    -
    -        with open(os.path.join(self.test_root_dir, project_name, 'dbt_project.yml'), 'r') as f:
    -            assert f.read() == f"""
    -# Name your project! Project names should contain only lowercase characters
    -# and underscores. A good package name should reflect your organization's
    -# name or the intended use of these models
    -name: '{project_name}'
    -version: '1.0.0'
    -config-version: 2
    -
    -# This setting configures which "profile" dbt uses for this project.
    -profile: '{project_name}'
    -
    -# These configurations specify where dbt should look for different types of files.
    -# The `model-paths` config, for example, states that models in this project can be
    -# found in the "models/" directory. You probably won't need to change these!
    -model-paths: ["models"]
    -analysis-paths: ["analyses"]
    -test-paths: ["tests"]
    -seed-paths: ["seeds"]
    -macro-paths: ["macros"]
    -snapshot-paths: ["snapshots"]
    -
    -target-path: "target"  # directory which will store compiled SQL files
    -clean-targets:         # directories to be removed by `dbt clean`
    -  - "target"
    -  - "dbt_packages"
    -
    -
    -# Configuring models
    -# Full documentation: https://docs.getdbt.com/docs/configuring-models
    -
    -# In this example config, we tell dbt to build all models in the example/
    -# directory as views. These settings can be overridden in the individual model
    -# files using the `{{{{ config(...) }}}}` macro.
    -models:
    -  {project_name}:
    -    # Config indicated by + and applies to all files under models/example/
    -    example:
    -      +materialized: view
    -"""
    -
    -    @use_profile('postgres')
    -    @mock.patch('dbt.task.init._get_adapter_plugin_names')
    -    @mock.patch('click.confirm')
    -    @mock.patch('click.prompt')
    -    def test_postgres_init_invalid_project_name_cli(self, mock_prompt, mock_confirm, mock_get_adapter):
    -        manager = Mock()
    -        manager.attach_mock(mock_prompt, 'prompt')
    -        manager.attach_mock(mock_confirm, 'confirm')
    -
    -        os.remove('dbt_project.yml')
    -        invalid_name = 'name-with-hyphen'
    -        valid_name = self.get_project_name()
    -        manager.prompt.side_effect = [
    -            valid_name
    -        ]
    -        mock_get_adapter.return_value = [1]
    -
    -        self.run_dbt(['init', invalid_name, '-s'])
    -        manager.assert_has_calls([
    -            call.prompt("Enter a name for your project (letters, digits, underscore)"),
    -        ])
    -
    -    @use_profile('postgres')
    -    @mock.patch('dbt.task.init._get_adapter_plugin_names')
    -    @mock.patch('click.confirm')
    -    @mock.patch('click.prompt')
    -    def test_postgres_init_invalid_project_name_prompt(self, mock_prompt, mock_confirm, mock_get_adapter):
    -        manager = Mock()
    -        manager.attach_mock(mock_prompt, 'prompt')
    -        manager.attach_mock(mock_confirm, 'confirm')
    -
    -        os.remove('dbt_project.yml')
    -
    -        invalid_name = 'name-with-hyphen'
    -        valid_name = self.get_project_name()
    -        manager.prompt.side_effect = [
    -            invalid_name, valid_name
    -        ]
    -        mock_get_adapter.return_value = [1]
    -
    -        self.run_dbt(['init', '-s'])
    -        manager.assert_has_calls([
    -            call.prompt("Enter a name for your project (letters, digits, underscore)"),
    -            call.prompt("Enter a name for your project (letters, digits, underscore)"),
    -        ])
    -
    -    @use_profile('postgres')
    -    @mock.patch('dbt.task.init._get_adapter_plugin_names')
    -    @mock.patch('click.confirm')
    -    @mock.patch('click.prompt')
    -    def test_postgres_init_skip_profile_setup(self, mock_prompt, mock_confirm, mock_get_adapter):
    -        manager = Mock()
    -        manager.attach_mock(mock_prompt, 'prompt')
    -        manager.attach_mock(mock_confirm, 'confirm')
    -
    -        # Start by removing the dbt_project.yml so that we're not in an existing project
    -        os.remove('dbt_project.yml')
    -
    -        project_name = self.get_project_name()
    -        manager.prompt.side_effect = [
    -            project_name,
    -        ]
    -        mock_get_adapter.return_value = [1]
    -
    -        # provide project name through the ini command
    -        self.run_dbt(['init', '-s'])
    -        manager.assert_has_calls([
    -            call.prompt("Enter a name for your project (letters, digits, underscore)")
    -        ])
    -
    -        with open(os.path.join(self.test_root_dir, project_name, 'dbt_project.yml'), 'r') as f:
    -            assert f.read() == f"""
    -# Name your project! Project names should contain only lowercase characters
    -# and underscores. A good package name should reflect your organization's
    -# name or the intended use of these models
    -name: '{project_name}'
    -version: '1.0.0'
    -config-version: 2
    -
    -# This setting configures which "profile" dbt uses for this project.
    -profile: '{project_name}'
    -
    -# These configurations specify where dbt should look for different types of files.
    -# The `model-paths` config, for example, states that models in this project can be
    -# found in the "models/" directory. You probably won't need to change these!
    -model-paths: ["models"]
    -analysis-paths: ["analyses"]
    -test-paths: ["tests"]
    -seed-paths: ["seeds"]
    -macro-paths: ["macros"]
    -snapshot-paths: ["snapshots"]
    -
    -target-path: "target"  # directory which will store compiled SQL files
    -clean-targets:         # directories to be removed by `dbt clean`
    -  - "target"
    -  - "dbt_packages"
    -
    -
    -# Configuring models
    -# Full documentation: https://docs.getdbt.com/docs/configuring-models
    -
    -# In this example config, we tell dbt to build all models in the example/
    -# directory as views. These settings can be overridden in the individual model
    -# files using the `{{{{ config(...) }}}}` macro.
    -models:
    -  {project_name}:
    -    # Config indicated by + and applies to all files under models/example/
    -    example:
    -      +materialized: view
    -"""
    -
    -    @use_profile('postgres')
    -    @mock.patch('dbt.task.init._get_adapter_plugin_names')
    -    @mock.patch('click.confirm')
    -    @mock.patch('click.prompt')
    -    def test_postgres_init_provided_project_name_and_skip_profile_setup(self, mock_prompt, mock_confirm, mock_get_adapter):
    -        manager = Mock()
    -        manager.attach_mock(mock_prompt, 'prompt')
    -        manager.attach_mock(mock_confirm, 'confirm')
    -
    -        # Start by removing the dbt_project.yml so that we're not in an existing project
    -        os.remove('dbt_project.yml')
    -
    -        manager.prompt.side_effect = [
    -            1,
    -            'localhost',
    -            5432,
    -            'test_username',
    -            'test_password',
    -            'test_db',
    -            'test_schema',
    -            4,
    -        ]
    -        mock_get_adapter.return_value = [1]
    -
    -        # provide project name through the ini command
    -        project_name = self.get_project_name()
    -        self.run_dbt(['init', project_name, '-s'])
    -        manager.assert_not_called()
    -
    -        with open(os.path.join(self.test_root_dir, project_name, 'dbt_project.yml'), 'r') as f:
    -            assert f.read() == f"""
    -# Name your project! Project names should contain only lowercase characters
    -# and underscores. A good package name should reflect your organization's
    -# name or the intended use of these models
    -name: '{project_name}'
    -version: '1.0.0'
    -config-version: 2
    -
    -# This setting configures which "profile" dbt uses for this project.
    -profile: '{project_name}'
    -
    -# These configurations specify where dbt should look for different types of files.
    -# The `model-paths` config, for example, states that models in this project can be
    -# found in the "models/" directory. You probably won't need to change these!
    -model-paths: ["models"]
    -analysis-paths: ["analyses"]
    -test-paths: ["tests"]
    -seed-paths: ["seeds"]
    -macro-paths: ["macros"]
    -snapshot-paths: ["snapshots"]
    -
    -target-path: "target"  # directory which will store compiled SQL files
    -clean-targets:         # directories to be removed by `dbt clean`
    -  - "target"
    -  - "dbt_packages"
    -
    -
    -# Configuring models
    -# Full documentation: https://docs.getdbt.com/docs/configuring-models
    -
    -# In this example config, we tell dbt to build all models in the example/
    -# directory as views. These settings can be overridden in the individual model
    -# files using the `{{{{ config(...) }}}}` macro.
    -models:
    -  {project_name}:
    -    # Config indicated by + and applies to all files under models/example/
    -    example:
    -      +materialized: view
    -"""
    diff --git a/test/integration/043_custom_aliases_tests/macros-configs/macros.sql b/test/integration/043_custom_aliases_tests/macros-configs/macros.sql
    deleted file mode 100644
    index a50044ea09f..00000000000
    --- a/test/integration/043_custom_aliases_tests/macros-configs/macros.sql
    +++ /dev/null
    @@ -1,17 +0,0 @@
    -
    -{#-- Verify that the config['alias'] key is present #}
    -{% macro generate_alias_name(custom_alias_name, node) -%}
    -    {%- if custom_alias_name is none -%}
    -        {{ node.name }}
    -    {%- else -%}
    -        custom_{{ node.config['alias'] if 'alias' in node.config else '' | trim }}
    -    {%- endif -%}
    -{%- endmacro %}
    -
    -{% macro string_literal(s) -%}
    -  {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }}
    -{%- endmacro %}
    -
    -{% macro default__string_literal(s) %}
    -    '{{ s }}'::text
    -{% endmacro %}
    diff --git a/test/integration/043_custom_aliases_tests/macros/macros.sql b/test/integration/043_custom_aliases_tests/macros/macros.sql
    deleted file mode 100644
    index a29f223b075..00000000000
    --- a/test/integration/043_custom_aliases_tests/macros/macros.sql
    +++ /dev/null
    @@ -1,17 +0,0 @@
    -
    -{% macro generate_alias_name(custom_alias_name, node) -%}
    -    {%- if custom_alias_name is none -%}
    -        {{ node.name }}
    -    {%- else -%}
    -        custom_{{ custom_alias_name | trim }}
    -    {%- endif -%}
    -{%- endmacro %}
    -
    -
    -{% macro string_literal(s) -%}
    -  {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }}
    -{%- endmacro %}
    -
    -{% macro default__string_literal(s) %}
    -    '{{ s }}'::text
    -{% endmacro %}
    diff --git a/test/integration/043_custom_aliases_tests/models/model1.sql b/test/integration/043_custom_aliases_tests/models/model1.sql
    deleted file mode 100644
    index 000ce2ed6c5..00000000000
    --- a/test/integration/043_custom_aliases_tests/models/model1.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table', alias='alias') }}
    -
    -select {{ string_literal(this.name) }} as model_name
    diff --git a/test/integration/043_custom_aliases_tests/models/model2.sql b/test/integration/043_custom_aliases_tests/models/model2.sql
    deleted file mode 100644
    index a2de8f099ea..00000000000
    --- a/test/integration/043_custom_aliases_tests/models/model2.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select {{ string_literal(this.name) }} as model_name
    diff --git a/test/integration/043_custom_aliases_tests/models/schema.yml b/test/integration/043_custom_aliases_tests/models/schema.yml
    deleted file mode 100644
    index 4d43836e482..00000000000
    --- a/test/integration/043_custom_aliases_tests/models/schema.yml
    +++ /dev/null
    @@ -1,15 +0,0 @@
    -version: 2
    -
    -models:
    -  - name: model1
    -    columns:
    -      - name: model_name
    -        tests:
    -          - accepted_values:
    -             values: ['custom_alias']
    -  - name: model2
    -    columns:
    -      - name: model_name
    -        tests:
    -          - accepted_values:
    -             values: ['model2']
    diff --git a/test/integration/043_custom_aliases_tests/test_custom_aliases.py b/test/integration/043_custom_aliases_tests/test_custom_aliases.py
    deleted file mode 100644
    index 1acc9dd5224..00000000000
    --- a/test/integration/043_custom_aliases_tests/test_custom_aliases.py
    +++ /dev/null
    @@ -1,39 +0,0 @@
    -from test.integration.base import DBTIntegrationTest, use_profile
    -
    -
    -class TestAliases(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return "custom_aliases_043"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            "macro-paths": ['macros'],
    -        }
    -
    -    @use_profile('postgres')
    -    def test_postgres_customer_alias_name(self):
    -        results = self.run_dbt(['run'])
    -        self.assertEqual(len(results), 2)
    -        self.run_dbt(['test'])
    -
    -
    -class TestAliasesWithConfig(TestAliases):
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            "macro-paths": ['macros-configs'],
    -        }
    -
    -    @use_profile('postgres')
    -    def test_postgres_customer_alias_name(self):
    -        results = self.run_dbt(['run'])
    -        self.assertEqual(len(results), 2)
    -        self.run_dbt(['test'])
    diff --git a/test/integration/045_test_severity_tests/models/model.sql b/test/integration/045_test_severity_tests/models/model.sql
    deleted file mode 100644
    index 3e29210ab0a..00000000000
    --- a/test/integration/045_test_severity_tests/models/model.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select * from {{ source('source', 'nulls') }}
    diff --git a/test/integration/045_test_severity_tests/models/schema.yml b/test/integration/045_test_severity_tests/models/schema.yml
    deleted file mode 100644
    index 207c16c16c7..00000000000
    --- a/test/integration/045_test_severity_tests/models/schema.yml
    +++ /dev/null
    @@ -1,19 +0,0 @@
    -version: 2
    -models:
    -  - name: model
    -    columns:
    -      - name: email
    -        tests:
    -          - not_null:
    -              severity: "{{ 'error' if var('strict', false) else 'warn' }}"
    -sources:
    -  - name: source
    -    schema: "{{ var('test_run_schema') }}"
    -    tables:
    -      - name: nulls
    -        identifier: null_seed
    -        columns:
    -          - name: email
    -            tests:
    -              - not_null:
    -                  severity: "{{ 'error' if var('strict', false) else 'warn' }}"
    diff --git a/test/integration/045_test_severity_tests/seeds/null_seed.csv b/test/integration/045_test_severity_tests/seeds/null_seed.csv
    deleted file mode 100644
    index b26a87430ac..00000000000
    --- a/test/integration/045_test_severity_tests/seeds/null_seed.csv
    +++ /dev/null
    @@ -1,21 +0,0 @@
    -id,first_name,last_name,email,gender,ip_address,updated_at
    -1,Judith,Kennedy,jkennedy0@phpbb.com,Female,54.60.24.128,2015-12-24 12:19:28
    -2,Arthur,Kelly,akelly1@eepurl.com,Male,62.56.24.215,2015-10-28 16:22:15
    -3,Rachel,Moreno,rmoreno2@msu.edu,Female,31.222.249.23,2016-04-05 02:05:30
    -4,Ralph,Turner,rturner3@hp.com,Male,157.83.76.114,2016-08-08 00:06:51
    -5,Laura,Gonzales,lgonzales4@howstuffworks.com,Female,30.54.105.168,2016-09-01 08:25:38
    -6,Katherine,Lopez,null,Female,169.138.46.89,2016-08-30 18:52:11
    -7,Jeremy,Hamilton,jhamilton6@mozilla.org,Male,231.189.13.133,2016-07-17 02:09:46
    -8,Heather,Rose,hrose7@goodreads.com,Female,87.165.201.65,2015-12-29 22:03:56
    -9,Gregory,Kelly,gkelly8@trellian.com,Male,154.209.99.7,2016-03-24 21:18:16
    -10,Rachel,Lopez,rlopez9@themeforest.net,Female,237.165.82.71,2016-08-20 15:44:49
    -11,Donna,Welch,dwelcha@shutterfly.com,Female,103.33.110.138,2016-02-27 01:41:48
    -12,Russell,Lawrence,rlawrenceb@qq.com,Male,189.115.73.4,2016-06-11 03:07:09
    -13,Michelle,Montgomery,mmontgomeryc@scientificamerican.com,Female,243.220.95.82,2016-06-18 16:27:19
    -14,Walter,Castillo,null,Male,71.159.238.196,2016-10-06 01:55:44
    -15,Robin,Mills,rmillse@vkontakte.ru,Female,172.190.5.50,2016-10-31 11:41:21
    -16,Raymond,Holmes,rholmesf@usgs.gov,Male,148.153.166.95,2016-10-03 08:16:38
    -17,Gary,Bishop,gbishopg@plala.or.jp,Male,161.108.182.13,2016-08-29 19:35:20
    -18,Anna,Riley,arileyh@nasa.gov,Female,253.31.108.22,2015-12-11 04:34:27
    -19,Sarah,Knight,sknighti@foxnews.com,Female,222.220.3.177,2016-09-26 00:49:06
    -20,Phyllis,Fox,pfoxj@creativecommons.org,Female,163.191.232.95,2016-08-21 10:35:19
    diff --git a/test/integration/045_test_severity_tests/test_severity.py b/test/integration/045_test_severity_tests/test_severity.py
    deleted file mode 100644
    index 965862a2e7a..00000000000
    --- a/test/integration/045_test_severity_tests/test_severity.py
    +++ /dev/null
    @@ -1,93 +0,0 @@
    -from test.integration.base import DBTIntegrationTest, use_profile
    -
    -
    -class TestSeverity(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return "severity_045"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'seed-paths': ['seeds'],
    -            'test-paths': ['tests'],
    -            'seeds': {
    -                'quote_columns': False,
    -            },
    -        }
    -
    -    def run_dbt_with_vars(self, cmd, strict_var, *args, **kwargs):
    -        cmd.extend(['--vars',
    -                    '{{test_run_schema: {}, strict: {}}}'.format(self.unique_schema(), strict_var)])
    -        return self.run_dbt(cmd, *args, **kwargs)
    -
    -    @use_profile('postgres')
    -    def test_postgres_severity_warnings(self):
    -        self.run_dbt_with_vars(['seed'], 'false')
    -        self.run_dbt_with_vars(['run'], 'false')
    -        results = self.run_dbt_with_vars(
    -            ['test', '--select', 'test_type:generic'], 'false')
    -        self.assertEqual(len(results), 2)
    -        self.assertEqual(results[0].status, 'warn')
    -        self.assertEqual(results[0].failures, 2)
    -        self.assertEqual(results[1].status, 'warn')
    -        self.assertEqual(results[1].failures, 2)
    -
    -    @use_profile('postgres')
    -    def test_postgres_severity_rendered_errors(self):
    -        self.run_dbt_with_vars(['seed'], 'false')
    -        self.run_dbt_with_vars(['run'], 'false')
    -        results = self.run_dbt_with_vars(
    -            ['test', '--select', 'test_type:generic'], 'true', expect_pass=False)
    -        self.assertEqual(len(results), 2)
    -        self.assertEqual(results[0].status, 'fail')
    -        self.assertEqual(results[0].failures, 2)
    -        self.assertEqual(results[1].status, 'fail')
    -        self.assertEqual(results[1].failures, 2)
    -
    -    @use_profile('postgres')
    -    def test_postgres_severity_warnings_strict(self):
    -        self.run_dbt_with_vars(['seed'], 'false')
    -        self.run_dbt_with_vars(['run'], 'false')
    -        results = self.run_dbt_with_vars(
    -            ['test', '--select', 'test_type:generic'], 'false', expect_pass=True)
    -        self.assertEqual(len(results), 2)
    -        self.assertEqual(results[0].status, 'warn')
    -        self.assertEqual(results[0].failures, 2)
    -        self.assertEqual(results[1].status, 'warn')
    -        self.assertEqual(results[1].failures, 2)
    -
    -    @use_profile('postgres')
    -    def test_postgres_data_severity_warnings(self):
    -        self.run_dbt_with_vars(['seed'], 'false')
    -        self.run_dbt_with_vars(['run'], 'false')
    -        results = self.run_dbt_with_vars(
    -            ['test', '--select', 'test_type:singular'], 'false')
    -        self.assertEqual(len(results), 1)
    -        self.assertEqual(results[0].status, 'warn')
    -        self.assertEqual(results[0].failures, 2)
    -
    -    @use_profile('postgres')
    -    def test_postgres_data_severity_rendered_errors(self):
    -        self.run_dbt_with_vars(['seed'], 'false')
    -        self.run_dbt_with_vars(['run'], 'false')
    -        results = self.run_dbt_with_vars(
    -            ['test', '--select', 'test_type:singular'], 'true', expect_pass=False)
    -        self.assertEqual(len(results), 1)
    -        self.assertEqual(results[0].status, 'fail')
    -        self.assertEqual(results[0].failures, 2)
    -
    -    @use_profile('postgres')
    -    def test_postgres_data_severity_warnings_strict(self):
    -        self.run_dbt_with_vars(['seed'], 'false')
    -        self.run_dbt_with_vars(['run'], 'false')
    -        results = self.run_dbt_with_vars(
    -            ['test', '--select', 'test_type:singular'], 'false', expect_pass=True)
    -        self.assertEqual(len(results), 1)
    -        self.assertTrue(results[0].status, 'fail')
    -        self.assertEqual(results[0].failures, 2)
    diff --git a/test/integration/045_test_severity_tests/tests/data.sql b/test/integration/045_test_severity_tests/tests/data.sql
    deleted file mode 100644
    index 65c5863ff03..00000000000
    --- a/test/integration/045_test_severity_tests/tests/data.sql
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -{{ config(severity='error' if var('strict', false) else 'warn') }}
    -select * from {{ ref('model') }} where email is null
    diff --git a/test/integration/052_column_quoting_tests/models-unquoted/model.sql b/test/integration/052_column_quoting_tests/models-unquoted/model.sql
    deleted file mode 100644
    index 1bdcda38353..00000000000
    --- a/test/integration/052_column_quoting_tests/models-unquoted/model.sql
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -{% set col_a = '"col_a"' %}
    -{% set col_b = '"col_b"' %}
    -
    -{{config(
    -    materialized = 'incremental',
    -    unique_key = col_a,
    -    incremental_strategy = var('strategy')
    -    )}}
    -
    -select
    -{{ col_a }}, {{ col_b }}
    -from {{ref('seed')}}
    diff --git a/test/integration/052_column_quoting_tests/models/model.sql b/test/integration/052_column_quoting_tests/models/model.sql
    deleted file mode 100644
    index 3bc61e082d9..00000000000
    --- a/test/integration/052_column_quoting_tests/models/model.sql
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -{% set col_a = '"col_A"' %}
    -{% set col_b = '"col_B"' %}
    -
    -{{config(
    -    materialized = 'incremental',
    -    unique_key = col_a,
    -    incremental_strategy = var('strategy')
    -    )}}
    -
    -select
    -{{ col_a }}, {{ col_b }}
    -from {{ref('seed')}}
    diff --git a/test/integration/052_column_quoting_tests/seeds/seed.csv b/test/integration/052_column_quoting_tests/seeds/seed.csv
    deleted file mode 100644
    index d4a1e26eed2..00000000000
    --- a/test/integration/052_column_quoting_tests/seeds/seed.csv
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -col_A,col_B
    -1,2
    -3,4
    -5,6
    diff --git a/test/integration/052_column_quoting_tests/test_column_quotes.py b/test/integration/052_column_quoting_tests/test_column_quotes.py
    deleted file mode 100644
    index f5aef6fed39..00000000000
    --- a/test/integration/052_column_quoting_tests/test_column_quotes.py
    +++ /dev/null
    @@ -1,78 +0,0 @@
    -from test.integration.base import DBTIntegrationTest,  use_profile
    -import os
    -
    -
    -class BaseColumnQuotingTest(DBTIntegrationTest):
    -    def column_quoting(self):
    -        raise NotImplementedError('column_quoting not implemented')
    -
    -    @property
    -    def schema(self):
    -        return 'dbt_column_quoting_052'
    -
    -    @staticmethod
    -    def dir(value):
    -        return os.path.normpath(value)
    -
    -    def _run_columnn_quotes(self, strategy='delete+insert'):
    -        strategy_vars = '{{"strategy": "{}"}}'.format(strategy)
    -        self.run_dbt(['seed', '--vars', strategy_vars])
    -        self.run_dbt(['run', '--vars', strategy_vars])
    -        self.run_dbt(['run', '--vars', strategy_vars])
    -
    -
    -class TestColumnQuotingDefault(BaseColumnQuotingTest):
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2
    -        }
    -
    -    @property
    -    def models(self):
    -        return self.dir('models')
    -
    -    def run_dbt(self, *args, **kwargs):
    -        return super().run_dbt(*args, **kwargs)
    -
    -    @use_profile('postgres')
    -    def test_postgres_column_quotes(self):
    -        self._run_columnn_quotes()
    -
    -
    -class TestColumnQuotingDisabled(BaseColumnQuotingTest):
    -    @property
    -    def models(self):
    -        return self.dir('models-unquoted')
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'seeds': {
    -                'quote_columns': False,
    -            },
    -        }
    -
    -    @use_profile('postgres')
    -    def test_postgres_column_quotes(self):
    -        self._run_columnn_quotes()
    -
    -
    -class TestColumnQuotingEnabled(BaseColumnQuotingTest):
    -    @property
    -    def models(self):
    -        return self.dir('models')
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'seeds': {
    -                'quote_columns': True,
    -            },
    -        }
    -
    -    @use_profile('postgres')
    -    def test_postgres_column_quotes(self):
    -        self._run_columnn_quotes()
    diff --git a/test/integration/055_ref_override_tests/macros/ref_override_macro.sql b/test/integration/055_ref_override_tests/macros/ref_override_macro.sql
    deleted file mode 100644
    index a4a85b50324..00000000000
    --- a/test/integration/055_ref_override_tests/macros/ref_override_macro.sql
    +++ /dev/null
    @@ -1,4 +0,0 @@
    --- Macro to override ref and always return the same result
    -{% macro ref(modelname) %}
    -{% do return(builtins.ref(modelname).replace_path(identifier='seed_2')) %}
    -{% endmacro %}
    \ No newline at end of file
    diff --git a/test/integration/055_ref_override_tests/models/ref_override.sql b/test/integration/055_ref_override_tests/models/ref_override.sql
    deleted file mode 100644
    index 3bbf936ae2e..00000000000
    --- a/test/integration/055_ref_override_tests/models/ref_override.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -select
    -    *
    -from {{ ref('seed_1') }}
    \ No newline at end of file
    diff --git a/test/integration/055_ref_override_tests/seeds/seed_1.csv b/test/integration/055_ref_override_tests/seeds/seed_1.csv
    deleted file mode 100644
    index 4de2771bdac..00000000000
    --- a/test/integration/055_ref_override_tests/seeds/seed_1.csv
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -a,b
    -1,2
    -2,4
    -3,6
    \ No newline at end of file
    diff --git a/test/integration/055_ref_override_tests/seeds/seed_2.csv b/test/integration/055_ref_override_tests/seeds/seed_2.csv
    deleted file mode 100644
    index eeadef9495c..00000000000
    --- a/test/integration/055_ref_override_tests/seeds/seed_2.csv
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -a,b
    -6,2
    -12,4
    -18,6
    \ No newline at end of file
    diff --git a/test/integration/055_ref_override_tests/test_ref_override.py b/test/integration/055_ref_override_tests/test_ref_override.py
    deleted file mode 100644
    index 748379b447c..00000000000
    --- a/test/integration/055_ref_override_tests/test_ref_override.py
    +++ /dev/null
    @@ -1,30 +0,0 @@
    -from test.integration.base import DBTIntegrationTest, use_profile
    -
    -
    -class TestRefOverride(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return "dbt_ref_override_055"
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'seed-paths': ['seeds'],
    -            "macro-paths": ["macros"],
    -            'seeds': {
    -                'quote_columns': False,
    -            },
    -        }
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @use_profile('postgres')
    -    def test_postgres_ref_override(self):
    -        self.run_dbt(['seed'])
    -        self.run_dbt(['run'])
    -        # We want it to equal seed_2 and not seed_1. If it's
    -        # still pointing at seed_1 then the override hasn't worked.
    -        self.assertTablesEqual('ref_override', 'seed_2')
    diff --git a/test/integration/056_column_type_tests/macros/test_alter_column_type.sql b/test/integration/056_column_type_tests/macros/test_alter_column_type.sql
    deleted file mode 100644
    index 133d59fada5..00000000000
    --- a/test/integration/056_column_type_tests/macros/test_alter_column_type.sql
    +++ /dev/null
    @@ -1,5 +0,0 @@
    --- Macro to alter a column type
    -{% macro test_alter_column_type(model_name, column_name, new_column_type) %}
    -  {% set relation = ref(model_name) %}
    -  {{ alter_column_type(relation, column_name, new_column_type) }}
    -{% endmacro %}
    diff --git a/test/integration/056_column_type_tests/pg_models/model.sql b/test/integration/056_column_type_tests/pg_models/model.sql
    deleted file mode 100644
    index f1b877225a9..00000000000
    --- a/test/integration/056_column_type_tests/pg_models/model.sql
    +++ /dev/null
    @@ -1,9 +0,0 @@
    -select
    -    1::smallint as smallint_col,
    -    2::integer as int_col,
    -    3::bigint as bigint_col,
    -    4.0::real as real_col,
    -    5.0::double precision as double_col,
    -    6.0::numeric as numeric_col,
    -    '7'::text as text_col,
    -    '8'::varchar(20) as varchar_col
    diff --git a/test/integration/056_column_type_tests/pg_models/schema.yml b/test/integration/056_column_type_tests/pg_models/schema.yml
    deleted file mode 100644
    index 93e309d1b0b..00000000000
    --- a/test/integration/056_column_type_tests/pg_models/schema.yml
    +++ /dev/null
    @@ -1,14 +0,0 @@
    -version: 2
    -models:
    -  - name: model
    -    tests:
    -      - is_type:
    -          column_map:
    -            smallint_col: ['integer', 'number']
    -            int_col: ['integer', 'number']
    -            bigint_col: ['integer', 'number']
    -            real_col: ['float', 'number']
    -            double_col: ['float', 'number']
    -            numeric_col: ['numeric', 'number']
    -            text_col: ['string', 'not number']
    -            varchar_col: ['string', 'not number']
    diff --git a/test/integration/056_column_type_tests/test_alter_column_types.py b/test/integration/056_column_type_tests/test_alter_column_types.py
    deleted file mode 100644
    index e06e1f5697c..00000000000
    --- a/test/integration/056_column_type_tests/test_alter_column_types.py
    +++ /dev/null
    @@ -1,13 +0,0 @@
    -from test.integration.base import DBTIntegrationTest, use_profile
    -import yaml
    -
    -
    -class TestAlterColumnTypes(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return '056_alter_column_types'
    -
    -    def run_and_alter_and_test(self, alter_column_type_args):
    -        self.assertEqual(len(self.run_dbt(['run'])), 1)
    -        self.run_dbt(['run-operation', 'test_alter_column_type', '--args', alter_column_type_args])
    -        self.assertEqual(len(self.run_dbt(['test'])), 1)
    diff --git a/test/integration/056_column_type_tests/test_column_types.py b/test/integration/056_column_type_tests/test_column_types.py
    deleted file mode 100644
    index 66abbb4c970..00000000000
    --- a/test/integration/056_column_type_tests/test_column_types.py
    +++ /dev/null
    @@ -1,22 +0,0 @@
    -from test.integration.base import DBTIntegrationTest, use_profile
    -
    -
    -class TestColumnTypes(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return '056_column_types'
    -
    -    def run_and_test(self):
    -        self.assertEqual(len(self.run_dbt(['run'])), 1)
    -        self.assertEqual(len(self.run_dbt(['test'])), 1)
    -
    -
    -class TestPostgresColumnTypes(TestColumnTypes):
    -    @property
    -    def models(self):
    -        return 'pg_models'
    -
    -    @use_profile('postgres')
    -    def test_postgres_column_types(self):
    -        self.run_and_test()
    -
    diff --git a/test/integration/057_run_query_tests/test_pg_types.py b/test/integration/057_run_query_tests/test_pg_types.py
    deleted file mode 100644
    index d6553bb9e8e..00000000000
    --- a/test/integration/057_run_query_tests/test_pg_types.py
    +++ /dev/null
    @@ -1,25 +0,0 @@
    -
    -from test.integration.base import DBTIntegrationTest, use_profile
    -import json
    -
    -class TestPostgresTypes(DBTIntegrationTest):
    -
    -    @property
    -    def schema(self):
    -        return "pg_query_types_057"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'macro-paths': ['macros'],
    -        }
    -
    -    @use_profile('postgres')
    -    def test__postgres_nested_types(self):
    -        result = self.run_dbt(['run-operation', 'test_array_results'])
    -        self.assertTrue(result.success)
    diff --git a/test/integration/060_persist_docs_tests/models-column-missing/missing_column.sql b/test/integration/060_persist_docs_tests/models-column-missing/missing_column.sql
    deleted file mode 100644
    index 642b0f14a19..00000000000
    --- a/test/integration/060_persist_docs_tests/models-column-missing/missing_column.sql
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -{{ config(materialized='table') }}
    -select 1 as id, 'Ed' as name
    diff --git a/test/integration/060_persist_docs_tests/models-column-missing/schema.yaml b/test/integration/060_persist_docs_tests/models-column-missing/schema.yaml
    deleted file mode 100644
    index aa7b4f88820..00000000000
    --- a/test/integration/060_persist_docs_tests/models-column-missing/schema.yaml
    +++ /dev/null
    @@ -1,8 +0,0 @@
    -version: 2
    -models:
    -  - name: missing_column
    -    columns:
    -      - name: id
    -        description: "test id column description"
    -      - name: column_that_does_not_exist
    -        description: "comment that cannot be created"
    diff --git a/test/integration/060_persist_docs_tests/models/my_fun_docs.md b/test/integration/060_persist_docs_tests/models/my_fun_docs.md
    deleted file mode 100644
    index f3c0fbf55ec..00000000000
    --- a/test/integration/060_persist_docs_tests/models/my_fun_docs.md
    +++ /dev/null
    @@ -1,10 +0,0 @@
    -{% docs my_fun_doc %}
    -name Column description "with double quotes"
    -and with 'single  quotes' as welll as other;
    -'''abc123'''
    -reserved -- characters
    ---
    -/* comment */
    -Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting
    -
    -{% enddocs %}
    diff --git a/test/integration/060_persist_docs_tests/models/no_docs_model.sql b/test/integration/060_persist_docs_tests/models/no_docs_model.sql
    deleted file mode 100644
    index e39a7a1566f..00000000000
    --- a/test/integration/060_persist_docs_tests/models/no_docs_model.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as id, 'Alice' as name
    diff --git a/test/integration/060_persist_docs_tests/models/table_model.sql b/test/integration/060_persist_docs_tests/models/table_model.sql
    deleted file mode 100644
    index c0e93c3f307..00000000000
    --- a/test/integration/060_persist_docs_tests/models/table_model.sql
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -{{ config(materialized='table') }}
    -select 1 as id, 'Joe' as name
    diff --git a/test/integration/060_persist_docs_tests/models/view_model.sql b/test/integration/060_persist_docs_tests/models/view_model.sql
    deleted file mode 100644
    index a6f96a16d5d..00000000000
    --- a/test/integration/060_persist_docs_tests/models/view_model.sql
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -{{ config(materialized='view') }}
    -select 2 as id, 'Bob' as name
    diff --git a/test/integration/060_persist_docs_tests/seeds/seed.csv b/test/integration/060_persist_docs_tests/seeds/seed.csv
    deleted file mode 100644
    index 1a728c8ab74..00000000000
    --- a/test/integration/060_persist_docs_tests/seeds/seed.csv
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -id,name
    -1,Alice
    -2,Bob
    diff --git a/test/integration/060_persist_docs_tests/test_persist_docs.py b/test/integration/060_persist_docs_tests/test_persist_docs.py
    deleted file mode 100644
    index 89fecf6383e..00000000000
    --- a/test/integration/060_persist_docs_tests/test_persist_docs.py
    +++ /dev/null
    @@ -1,126 +0,0 @@
    -from test.integration.base import DBTIntegrationTest, use_profile
    -import os
    -
    -import json
    -
    -
    -class BasePersistDocsTest(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return "persist_docs_060"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    def _assert_common_comments(self, *comments):
    -        for comment in comments:
    -            assert '"with double quotes"' in comment
    -            assert """'''abc123'''""" in comment
    -            assert '\n' in comment
    -            assert 'Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting' in comment
    -            assert '/* comment */' in comment
    -            if os.name == 'nt':
    -                assert '--\r\n' in comment or '--\n' in comment
    -            else:
    -                assert '--\n' in comment
    -
    -    def _assert_has_table_comments(self, table_node):
    -        table_comment = table_node['metadata']['comment']
    -        assert table_comment.startswith('Table model description')
    -
    -        table_id_comment = table_node['columns']['id']['comment']
    -        assert table_id_comment.startswith('id Column description')
    -
    -        table_name_comment = table_node['columns']['name']['comment']
    -        assert table_name_comment.startswith(
    -            'Some stuff here and then a call to')
    -
    -        self._assert_common_comments(
    -            table_comment, table_id_comment, table_name_comment
    -        )
    -
    -    def _assert_has_view_comments(self, view_node, has_node_comments=True,
    -                                  has_column_comments=True):
    -        view_comment = view_node['metadata']['comment']
    -        if has_node_comments:
    -            assert view_comment.startswith('View model description')
    -            self._assert_common_comments(view_comment)
    -        else:
    -            assert view_comment is None
    -
    -        view_id_comment = view_node['columns']['id']['comment']
    -        if has_column_comments:
    -            assert view_id_comment.startswith('id Column description')
    -            self._assert_common_comments(view_id_comment)
    -        else:
    -            assert view_id_comment is None
    -
    -        view_name_comment = view_node['columns']['name']['comment']
    -        assert view_name_comment is None
    -
    -
    -class TestPersistDocs(BasePersistDocsTest):
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'models': {
    -                'test': {
    -                    '+persist_docs': {
    -                        "relation": True,
    -                        "columns": True,
    -                    },
    -                }
    -            }
    -        }
    -
    -    def run_has_comments_pglike(self):
    -        self.run_dbt()
    -        self.run_dbt(['docs', 'generate'])
    -        with open('target/catalog.json') as fp:
    -            catalog_data = json.load(fp)
    -        assert 'nodes' in catalog_data
    -        assert len(catalog_data['nodes']) == 3
    -        table_node = catalog_data['nodes']['model.test.table_model']
    -        view_node = self._assert_has_table_comments(table_node)
    -
    -        view_node = catalog_data['nodes']['model.test.view_model']
    -        self._assert_has_view_comments(view_node)
    -
    -        no_docs_node = catalog_data['nodes']['model.test.no_docs_model']
    -        self._assert_has_view_comments(no_docs_node, False, False)
    -
    -    @use_profile('postgres')
    -    def test_postgres_comments(self):
    -        self.run_has_comments_pglike()
    -
    -class TestPersistDocsColumnMissing(BasePersistDocsTest):
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'models': {
    -                'test': {
    -                    '+persist_docs': {
    -                        "columns": True,
    -                    },
    -                }
    -            }
    -        }
    -
    -    @property
    -    def models(self):
    -        return 'models-column-missing'
    -
    -    @use_profile('postgres')
    -    def test_postgres_missing_column(self):
    -        self.run_dbt()
    -        self.run_dbt(['docs', 'generate'])
    -        with open('target/catalog.json') as fp:
    -            catalog_data = json.load(fp)
    -        assert 'nodes' in catalog_data
    -
    -        table_node = catalog_data['nodes']['model.test.missing_column']
    -        table_id_comment = table_node['columns']['id']['comment']
    -        assert table_id_comment.startswith('test id column description')
    diff --git a/test/integration/062_defer_state_tests/changed_models/ephemeral_model.sql b/test/integration/062_defer_state_tests/changed_models/ephemeral_model.sql
    deleted file mode 100644
    index 2f976e3a9b5..00000000000
    --- a/test/integration/062_defer_state_tests/changed_models/ephemeral_model.sql
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -{{ config(materialized='ephemeral') }}
    -select * from {{ ref('view_model') }}
    diff --git a/test/integration/062_defer_state_tests/changed_models/schema.yml b/test/integration/062_defer_state_tests/changed_models/schema.yml
    deleted file mode 100644
    index 1ec506d3d19..00000000000
    --- a/test/integration/062_defer_state_tests/changed_models/schema.yml
    +++ /dev/null
    @@ -1,9 +0,0 @@
    -version: 2
    -models:
    -  - name: view_model
    -    columns:
    -      - name: id
    -        tests:
    -          - unique
    -          - not_null
    -      - name: name
    diff --git a/test/integration/062_defer_state_tests/changed_models/table_model.sql b/test/integration/062_defer_state_tests/changed_models/table_model.sql
    deleted file mode 100644
    index 65909318bab..00000000000
    --- a/test/integration/062_defer_state_tests/changed_models/table_model.sql
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -{{ config(materialized='table') }}
    -select * from {{ ref('ephemeral_model') }}
    -
    --- establish a macro dependency to trigger state:modified.macros
    --- depends on: {{ my_macro() }}
    \ No newline at end of file
    diff --git a/test/integration/062_defer_state_tests/changed_models/view_model.sql b/test/integration/062_defer_state_tests/changed_models/view_model.sql
    deleted file mode 100644
    index bddbbb23cc2..00000000000
    --- a/test/integration/062_defer_state_tests/changed_models/view_model.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select * from no.such.table
    diff --git a/test/integration/062_defer_state_tests/changed_models_bad/ephemeral_model.sql b/test/integration/062_defer_state_tests/changed_models_bad/ephemeral_model.sql
    deleted file mode 100644
    index 5155dfa475e..00000000000
    --- a/test/integration/062_defer_state_tests/changed_models_bad/ephemeral_model.sql
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -{{ config(materialized='ephemeral') }}
    -select * from no.such.table
    diff --git a/test/integration/062_defer_state_tests/changed_models_bad/schema.yml b/test/integration/062_defer_state_tests/changed_models_bad/schema.yml
    deleted file mode 100644
    index 1ec506d3d19..00000000000
    --- a/test/integration/062_defer_state_tests/changed_models_bad/schema.yml
    +++ /dev/null
    @@ -1,9 +0,0 @@
    -version: 2
    -models:
    -  - name: view_model
    -    columns:
    -      - name: id
    -        tests:
    -          - unique
    -          - not_null
    -      - name: name
    diff --git a/test/integration/062_defer_state_tests/changed_models_bad/table_model.sql b/test/integration/062_defer_state_tests/changed_models_bad/table_model.sql
    deleted file mode 100644
    index 65909318bab..00000000000
    --- a/test/integration/062_defer_state_tests/changed_models_bad/table_model.sql
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -{{ config(materialized='table') }}
    -select * from {{ ref('ephemeral_model') }}
    -
    --- establish a macro dependency to trigger state:modified.macros
    --- depends on: {{ my_macro() }}
    \ No newline at end of file
    diff --git a/test/integration/062_defer_state_tests/changed_models_bad/view_model.sql b/test/integration/062_defer_state_tests/changed_models_bad/view_model.sql
    deleted file mode 100644
    index bddbbb23cc2..00000000000
    --- a/test/integration/062_defer_state_tests/changed_models_bad/view_model.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select * from no.such.table
    diff --git a/test/integration/062_defer_state_tests/changed_models_missing/schema.yml b/test/integration/062_defer_state_tests/changed_models_missing/schema.yml
    deleted file mode 100644
    index 1ec506d3d19..00000000000
    --- a/test/integration/062_defer_state_tests/changed_models_missing/schema.yml
    +++ /dev/null
    @@ -1,9 +0,0 @@
    -version: 2
    -models:
    -  - name: view_model
    -    columns:
    -      - name: id
    -        tests:
    -          - unique
    -          - not_null
    -      - name: name
    diff --git a/test/integration/062_defer_state_tests/changed_models_missing/table_model.sql b/test/integration/062_defer_state_tests/changed_models_missing/table_model.sql
    deleted file mode 100644
    index 22b040d2c8b..00000000000
    --- a/test/integration/062_defer_state_tests/changed_models_missing/table_model.sql
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -{{ config(materialized='table') }}
    -select 1 as fun
    diff --git a/test/integration/062_defer_state_tests/changed_models_missing/view_model.sql b/test/integration/062_defer_state_tests/changed_models_missing/view_model.sql
    deleted file mode 100644
    index 4b91aa0f2fa..00000000000
    --- a/test/integration/062_defer_state_tests/changed_models_missing/view_model.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select * from {{ ref('seed') }}
    diff --git a/test/integration/062_defer_state_tests/macros/infinite_macros.sql b/test/integration/062_defer_state_tests/macros/infinite_macros.sql
    deleted file mode 100644
    index 81d2083d3bb..00000000000
    --- a/test/integration/062_defer_state_tests/macros/infinite_macros.sql
    +++ /dev/null
    @@ -1,13 +0,0 @@
    -{# trigger infinite recursion if not handled #}
    -
    -{% macro my_infinitely_recursive_macro() %}
    -  {{ return(adapter.dispatch('my_infinitely_recursive_macro')()) }}
    -{% endmacro %}
    -
    -{% macro default__my_infinitely_recursive_macro() %}
    -    {% if unmet_condition %}
    -        {{ my_infinitely_recursive_macro() }}
    -    {% else %}
    -        {{ return('') }}
    -    {% endif %}
    -{% endmacro %}
    diff --git a/test/integration/062_defer_state_tests/macros/macros.sql b/test/integration/062_defer_state_tests/macros/macros.sql
    deleted file mode 100644
    index 79519c1b60b..00000000000
    --- a/test/integration/062_defer_state_tests/macros/macros.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{% macro my_macro() %}
    -    {% do log('in a macro' ) %}
    -{% endmacro %}
    diff --git a/test/integration/062_defer_state_tests/models/ephemeral_model.sql b/test/integration/062_defer_state_tests/models/ephemeral_model.sql
    deleted file mode 100644
    index 2f976e3a9b5..00000000000
    --- a/test/integration/062_defer_state_tests/models/ephemeral_model.sql
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -{{ config(materialized='ephemeral') }}
    -select * from {{ ref('view_model') }}
    diff --git a/test/integration/062_defer_state_tests/models/exposures.yml b/test/integration/062_defer_state_tests/models/exposures.yml
    deleted file mode 100644
    index 489dec3c3c4..00000000000
    --- a/test/integration/062_defer_state_tests/models/exposures.yml
    +++ /dev/null
    @@ -1,8 +0,0 @@
    -version: 2
    -exposures:
    -  - name: my_exposure
    -    type: application
    -    depends_on:
    -      - ref('view_model')
    -    owner:
    -      email: test@example.com
    diff --git a/test/integration/062_defer_state_tests/models/schema.yml b/test/integration/062_defer_state_tests/models/schema.yml
    deleted file mode 100644
    index 342335148bf..00000000000
    --- a/test/integration/062_defer_state_tests/models/schema.yml
    +++ /dev/null
    @@ -1,10 +0,0 @@
    -version: 2
    -models:
    -  - name: view_model
    -    columns:
    -      - name: id
    -        tests:
    -          - unique:
    -              severity: error
    -          - not_null
    -      - name: name
    diff --git a/test/integration/062_defer_state_tests/models/table_model.sql b/test/integration/062_defer_state_tests/models/table_model.sql
    deleted file mode 100644
    index 65909318bab..00000000000
    --- a/test/integration/062_defer_state_tests/models/table_model.sql
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -{{ config(materialized='table') }}
    -select * from {{ ref('ephemeral_model') }}
    -
    --- establish a macro dependency to trigger state:modified.macros
    --- depends on: {{ my_macro() }}
    \ No newline at end of file
    diff --git a/test/integration/062_defer_state_tests/models/view_model.sql b/test/integration/062_defer_state_tests/models/view_model.sql
    deleted file mode 100644
    index 72cb07a5ef4..00000000000
    --- a/test/integration/062_defer_state_tests/models/view_model.sql
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -select * from {{ ref('seed') }}
    -
    --- establish a macro dependency that trips infinite recursion if not handled
    --- depends on: {{ my_infinitely_recursive_macro() }}
    \ No newline at end of file
    diff --git a/test/integration/062_defer_state_tests/previous_state/manifest.json b/test/integration/062_defer_state_tests/previous_state/manifest.json
    deleted file mode 100644
    index 6ab63f3f563..00000000000
    --- a/test/integration/062_defer_state_tests/previous_state/manifest.json
    +++ /dev/null
    @@ -1,6 +0,0 @@
    -{
    -    "metadata": {
    -        "dbt_schema_version": "https://schemas.getdbt.com/dbt/manifest/v3.json",
    -        "dbt_version": "0.21.1"
    -    }
    -}
    diff --git a/test/integration/062_defer_state_tests/seeds/seed.csv b/test/integration/062_defer_state_tests/seeds/seed.csv
    deleted file mode 100644
    index 1a728c8ab74..00000000000
    --- a/test/integration/062_defer_state_tests/seeds/seed.csv
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -id,name
    -1,Alice
    -2,Bob
    diff --git a/test/integration/062_defer_state_tests/snapshots/my_snapshot.sql b/test/integration/062_defer_state_tests/snapshots/my_snapshot.sql
    deleted file mode 100644
    index 6a7d2b31bfa..00000000000
    --- a/test/integration/062_defer_state_tests/snapshots/my_snapshot.sql
    +++ /dev/null
    @@ -1,14 +0,0 @@
    -{% snapshot my_cool_snapshot %}
    -
    -    {{
    -        config(
    -            target_database=database,
    -            target_schema=schema,
    -            unique_key='id',
    -            strategy='check',
    -            check_cols=['id'],
    -        )
    -    }}
    -    select * from {{ ref('view_model') }}
    -
    -{% endsnapshot %}
    diff --git a/test/integration/062_defer_state_tests/test_defer_state.py b/test/integration/062_defer_state_tests/test_defer_state.py
    deleted file mode 100644
    index 058e43ef05f..00000000000
    --- a/test/integration/062_defer_state_tests/test_defer_state.py
    +++ /dev/null
    @@ -1,344 +0,0 @@
    -from test.integration.base import DBTIntegrationTest, use_profile
    -import copy
    -import json
    -import os
    -import shutil
    -
    -import pytest
    -import dbt.exceptions
    -
    -
    -class TestDeferState(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return "defer_state_062"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    def setUp(self):
    -        self.other_schema = None
    -        super().setUp()
    -        self._created_schemas.add(self.other_schema)
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'seeds': {
    -                'test': {
    -                    'quote_columns': False,
    -                }
    -            }
    -        }
    -
    -    def get_profile(self, adapter_type):
    -        if self.other_schema is None:
    -            self.other_schema = self.unique_schema() + '_other'
    -        profile = super().get_profile(adapter_type)
    -        default_name = profile['test']['target']
    -        profile['test']['outputs']['otherschema'] = copy.deepcopy(profile['test']['outputs'][default_name])
    -        profile['test']['outputs']['otherschema']['schema'] = self.other_schema
    -        return profile
    -
    -    def copy_state(self):
    -        assert not os.path.exists('state')
    -        os.makedirs('state')
    -        shutil.copyfile('target/manifest.json', 'state/manifest.json')
    -
    -    def run_and_compile_defer(self):
    -        results = self.run_dbt(['seed'])
    -        assert len(results) == 1
    -        assert not any(r.node.deferred for r in results)
    -        results = self.run_dbt(['run'])
    -        assert len(results) == 2
    -        assert not any(r.node.deferred for r in results)
    -        results = self.run_dbt(['test'])
    -        assert len(results) == 2
    -
    -        # copy files
    -        self.copy_state()
    -
    -        # defer test, it succeeds
    -        results, success = self.run_dbt_and_check(['compile', '--state', 'state', '--defer'])
    -        self.assertEqual(len(results.results), 6)
    -        self.assertEqual(results.results[0].node.name, "seed")
    -        self.assertTrue(success)        
    -
    -    def run_and_snapshot_defer(self):
    -        results = self.run_dbt(['seed'])
    -        assert len(results) == 1
    -        assert not any(r.node.deferred for r in results)
    -        results = self.run_dbt(['run'])
    -        assert len(results) == 2
    -        assert not any(r.node.deferred for r in results)
    -        results = self.run_dbt(['test'])
    -        assert len(results) == 2
    -
    -        # snapshot succeeds without --defer
    -        results = self.run_dbt(['snapshot'])
    -
    -        # no state, snapshot fails
    -        with pytest.raises(dbt.exceptions.RuntimeException):
    -            results = self.run_dbt(['snapshot', '--state', 'state', '--defer'])
    -
    -        # copy files
    -        self.copy_state()
    -
    -        # defer test, it succeeds
    -        results = self.run_dbt(['snapshot', '--state', 'state', '--defer'])
    -
    -        # favor_state test, it succeeds
    -        results = self.run_dbt(['snapshot', '--state', 'state', '--defer', '--favor-state'])
    -
    -    def run_and_defer(self):
    -        results = self.run_dbt(['seed'])
    -        assert len(results) == 1
    -        assert not any(r.node.deferred for r in results)
    -        results = self.run_dbt(['run'])
    -        assert len(results) == 2
    -        assert not any(r.node.deferred for r in results)
    -        results = self.run_dbt(['test'])
    -        assert len(results) == 2
    -
    -        # copy files over from the happy times when we had a good target
    -        self.copy_state()
    -
    -        # test tests first, because run will change things
    -        # no state, wrong schema, failure.
    -        self.run_dbt(['test', '--target', 'otherschema'], expect_pass=False)
    -
    -        # no state, run also fails
    -        self.run_dbt(['run', '--target', 'otherschema'], expect_pass=False)
    -
    -        # defer test, it succeeds
    -        results = self.run_dbt(['test', '-m', 'view_model+', '--state', 'state', '--defer', '--target', 'otherschema'])
    -
    -        # with state it should work though
    -        results = self.run_dbt(['run', '-m', 'view_model', '--state', 'state', '--defer', '--target', 'otherschema'])
    -        assert self.other_schema not in results[0].node.compiled_code
    -        assert self.unique_schema() in results[0].node.compiled_code
    -
    -        with open('target/manifest.json') as fp:
    -            data = json.load(fp)
    -        assert data['nodes']['seed.test.seed']['deferred']
    -
    -        assert len(results) == 1
    -
    -    def run_and_defer_favor_state(self):
    -        results = self.run_dbt(['seed'])
    -        assert len(results) == 1
    -        assert not any(r.node.deferred for r in results)
    -        results = self.run_dbt(['run'])
    -        assert len(results) == 2
    -        assert not any(r.node.deferred for r in results)
    -        results = self.run_dbt(['test'])
    -        assert len(results) == 2
    -
    -        # copy files over from the happy times when we had a good target
    -        self.copy_state()
    -
    -        # test tests first, because run will change things
    -        # no state, wrong schema, failure.
    -        self.run_dbt(['test', '--target', 'otherschema'], expect_pass=False)
    -
    -        # no state, run also fails
    -        self.run_dbt(['run', '--target', 'otherschema'], expect_pass=False)
    -
    -        # defer test, it succeeds
    -        results = self.run_dbt(['test', '-m', 'view_model+', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'])
    -
    -        # with state it should work though
    -        results = self.run_dbt(['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'])
    -        assert self.other_schema not in results[0].node.compiled_code
    -        assert self.unique_schema() in results[0].node.compiled_code
    -
    -        with open('target/manifest.json') as fp:
    -            data = json.load(fp)
    -        assert data['nodes']['seed.test.seed']['deferred']
    -
    -        assert len(results) == 1
    -
    -    def run_switchdirs_defer(self):
    -        results = self.run_dbt(['seed'])
    -        assert len(results) == 1
    -        results = self.run_dbt(['run'])
    -        assert len(results) == 2
    -
    -        # copy files over from the happy times when we had a good target
    -        self.copy_state()
    -
    -        self.use_default_project({'model-paths': ['changed_models']})
    -        # the sql here is just wrong, so it should fail
    -        self.run_dbt(
    -            ['run', '-m', 'view_model', '--state', 'state', '--defer', '--target', 'otherschema'],
    -            expect_pass=False,
    -        )
    -        # but this should work since we just use the old happy model
    -        self.run_dbt(
    -            ['run', '-m', 'table_model', '--state', 'state', '--defer', '--target', 'otherschema'],
    -            expect_pass=True,
    -        )
    -
    -        self.use_default_project({'model-paths': ['changed_models_bad']})
    -        # this should fail because the table model refs a broken ephemeral
    -        # model, which it should see
    -        self.run_dbt(
    -            ['run', '-m', 'table_model', '--state', 'state', '--defer', '--target', 'otherschema'],
    -            expect_pass=False,
    -        )
    -
    -    def run_switchdirs_defer_favor_state(self):
    -        results = self.run_dbt(['seed'])
    -        assert len(results) == 1
    -        results = self.run_dbt(['run'])
    -        assert len(results) == 2
    -
    -        # copy files over from the happy times when we had a good target
    -        self.copy_state()
    -
    -        self.use_default_project({'model-paths': ['changed_models']})
    -        # the sql here is just wrong, so it should fail
    -        self.run_dbt(
    -            ['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'],
    -            expect_pass=False,
    -        )
    -        # but this should work since we just use the old happy model
    -        self.run_dbt(
    -            ['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'],
    -            expect_pass=True,
    -        )
    -
    -        self.use_default_project({'model-paths': ['changed_models_bad']})
    -        # this should fail because the table model refs a broken ephemeral
    -        # model, which it should see
    -        self.run_dbt(
    -            ['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'],
    -            expect_pass=False,
    -        )
    -
    -    def run_defer_iff_not_exists(self):
    -        results = self.run_dbt(['seed', '--target', 'otherschema'])
    -        assert len(results) == 1
    -        results = self.run_dbt(['run', '--target', 'otherschema'])
    -        assert len(results) == 2
    -
    -        # copy files over from the happy times when we had a good target
    -        self.copy_state()
    -        results = self.run_dbt(['seed'])
    -        assert len(results) == 1
    -        results = self.run_dbt(['run', '--state', 'state', '--defer'])
    -        assert len(results) == 2
    -
    -        # because the seed now exists in our schema, we shouldn't defer it
    -        assert self.other_schema not in results[0].node.compiled_code
    -        assert self.unique_schema() in results[0].node.compiled_code
    -
    -    def run_defer_iff_not_exists_favor_state(self):
    -        results = self.run_dbt(['seed'])
    -        assert len(results) == 1
    -        results = self.run_dbt(['run'])
    -        assert len(results) == 2
    -
    -        # copy files over from the happy times when we had a good target
    -        self.copy_state()
    -        results = self.run_dbt(['seed'])
    -        assert len(results) == 1
    -        results = self.run_dbt(['run', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'])
    -        assert len(results) == 2
    -
    -        # because the seed exists in other schema, we should defer it
    -        assert self.other_schema not in results[0].node.compiled_code
    -        assert self.unique_schema() in results[0].node.compiled_code
    -
    -    def run_defer_deleted_upstream(self):
    -        results = self.run_dbt(['seed'])
    -        assert len(results) == 1
    -        results = self.run_dbt(['run'])
    -        assert len(results) == 2
    -
    -        # copy files over from the happy times when we had a good target
    -        self.copy_state()
    -
    -        self.use_default_project({'model-paths': ['changed_models_missing']})
    -        # ephemeral_model is now gone. previously this caused a
    -        # keyerror (dbt#2875), now it should pass
    -        self.run_dbt(
    -            ['run', '-m', 'view_model', '--state', 'state', '--defer', '--target', 'otherschema'],
    -            expect_pass=True,
    -        )
    -
    -        # despite deferral, test should use models just created in our schema
    -        results = self.run_dbt(['test', '--state', 'state', '--defer'])
    -        assert self.other_schema not in results[0].node.compiled_code
    -        assert self.unique_schema() in results[0].node.compiled_code
    -
    -    def run_defer_deleted_upstream_favor_state(self):
    -        results = self.run_dbt(['seed'])
    -        assert len(results) == 1
    -        results = self.run_dbt(['run'])
    -        assert len(results) == 2
    -
    -        # copy files over from the happy times when we had a good target
    -        self.copy_state()
    -
    -        self.use_default_project({'model-paths': ['changed_models_missing']})
    -
    -        self.run_dbt(
    -            ['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'],
    -            expect_pass=True,
    -        )
    -
    -        # despite deferral, test should use models just created in our schema
    -        results = self.run_dbt(['test', '--state', 'state', '--defer', '--favor-state'])
    -        assert self.other_schema not in results[0].node.compiled_code
    -        assert self.unique_schema() in results[0].node.compiled_code
    -
    -    @use_profile('postgres')
    -    def test_postgres_state_changetarget(self):
    -        self.run_and_defer()
    -
    -        # make sure these commands don't work with --defer
    -        with pytest.raises(SystemExit):
    -            self.run_dbt(['seed', '--defer'])
    -
    -    @use_profile('postgres')
    -    def test_postgres_state_changetarget_favor_state(self):
    -        self.run_and_defer_favor_state()
    -
    -        # make sure these commands don't work with --defer
    -        with pytest.raises(SystemExit):
    -            self.run_dbt(['seed', '--defer'])
    -
    -    @use_profile('postgres')
    -    def test_postgres_state_changedir(self):
    -        self.run_switchdirs_defer()
    -
    -    @use_profile('postgres')
    -    def test_postgres_state_changedir_favor_state(self):
    -        self.run_switchdirs_defer_favor_state()
    -
    -    @use_profile('postgres')
    -    def test_postgres_state_defer_iffnotexists(self):
    -        self.run_defer_iff_not_exists()
    -
    -    @use_profile('postgres')
    -    def test_postgres_state_defer_iffnotexists_favor_state(self):
    -        self.run_defer_iff_not_exists_favor_state()
    -
    -    @use_profile('postgres')
    -    def test_postgres_state_defer_deleted_upstream(self):
    -        self.run_defer_deleted_upstream()
    -
    -    @use_profile('postgres')
    -    def test_postgres_state_defer_deleted_upstream_favor_state(self):
    -        self.run_defer_deleted_upstream_favor_state()
    -
    -    @use_profile('postgres')
    -    def test_postgres_state_snapshot_defer(self):
    -        self.run_and_snapshot_defer()
    -
    -    @use_profile('postgres')
    -    def test_postgres_state_compile_defer(self):
    -        self.run_and_compile_defer()
    diff --git a/test/integration/062_defer_state_tests/test_modified_state.py b/test/integration/062_defer_state_tests/test_modified_state.py
    deleted file mode 100644
    index 5f64cd66ae1..00000000000
    --- a/test/integration/062_defer_state_tests/test_modified_state.py
    +++ /dev/null
    @@ -1,211 +0,0 @@
    -from test.integration.base import DBTIntegrationTest, use_profile
    -import os
    -import random
    -import shutil
    -import string
    -
    -import pytest
    -
    -from dbt.exceptions import CompilationException, IncompatibleSchemaException
    -
    -
    -class TestModifiedState(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return "modified_state_062"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'macro-paths': ['macros'],
    -            'seeds': {
    -                'test': {
    -                    'quote_columns': True,
    -                }
    -            }
    -        }
    -
    -    def _symlink_test_folders(self):
    -        # dbt's normal symlink behavior breaks this test. Copy the files
    -        # so we can freely modify them.
    -        for entry in os.listdir(self.test_original_source_path):
    -            src = os.path.join(self.test_original_source_path, entry)
    -            tst = os.path.join(self.test_root_dir, entry)
    -            if entry in {'models', 'seeds', 'macros', 'previous_state'}:
    -                shutil.copytree(src, tst)
    -            elif os.path.isdir(entry) or entry.endswith('.sql'):
    -                os.symlink(src, tst)
    -
    -    def copy_state(self):
    -        assert not os.path.exists('state')
    -        os.makedirs('state')
    -        shutil.copyfile('target/manifest.json', 'state/manifest.json')
    -
    -    def setUp(self):
    -        super().setUp()
    -        self.run_dbt(['seed'])
    -        self.run_dbt(['run'])
    -        self.copy_state()
    -
    -    @use_profile('postgres')
    -    def test_postgres_changed_seed_contents_state(self):
    -        results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True)
    -        assert len(results) == 0
    -        with open('seeds/seed.csv') as fp:
    -            fp.readline()
    -            newline = fp.newlines
    -        with open('seeds/seed.csv', 'a') as fp:
    -            fp.write(f'3,carl{newline}')
    -
    -        results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'])
    -        assert len(results) == 1
    -        assert results[0] == 'test.seed'
    -
    -        results = self.run_dbt(['ls', '--select', 'state:modified', '--state', './state'])
    -        assert len(results) == 1
    -        assert results[0] == 'test.seed'
    -
    -        results = self.run_dbt(['ls', '--select', 'state:modified+', '--state', './state'])
    -        assert len(results) == 7
    -        assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'}
    -
    -        shutil.rmtree('./state')
    -        self.copy_state()
    -
    -        with open('seeds/seed.csv', 'a') as fp:
    -            # assume each line is ~2 bytes + len(name)
    -            target_size = 1*1024*1024
    -            line_size = 64
    -
    -            num_lines = target_size // line_size
    -
    -            maxlines = num_lines + 4
    -
    -            for idx in range(4, maxlines):
    -                value = ''.join(random.choices(string.ascii_letters, k=62))
    -                fp.write(f'{idx},{value}{newline}')
    -
    -        # now if we run again, we should get a warning
    -        results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'])
    -        assert len(results) == 1
    -        assert results[0] == 'test.seed'
    -
    -        with pytest.raises(CompilationException) as exc:
    -            self.run_dbt(['--warn-error', 'ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'])
    -        assert '>1MB' in str(exc.value)
    -
    -        shutil.rmtree('./state')
    -        self.copy_state()
    -
    -        # once it's in path mode, we don't mark it as modified if it changes
    -        with open('seeds/seed.csv', 'a') as fp:
    -            fp.write(f'{random},test{newline}')
    -
    -        results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True)
    -        assert len(results) == 0
    -
    -    @use_profile('postgres')
    -    def test_postgres_changed_seed_config(self):
    -        results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True)
    -        assert len(results) == 0
    -
    -        self.use_default_project({'seeds': {'test': {'quote_columns': False}}})
    -
    -        # quoting change -> seed changed
    -        results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'])
    -        assert len(results) == 1
    -        assert results[0] == 'test.seed'
    -
    -    @use_profile('postgres')
    -    def test_postgres_unrendered_config_same(self):
    -        results = self.run_dbt(['ls', '--resource-type', 'model', '--select', 'state:modified', '--state', './state'], expect_pass=True)
    -        assert len(results) == 0
    -
    -        # although this is the default value, dbt will recognize it as a change
    -        # for previously-unconfigured models, because it's been explicitly set
    -        self.use_default_project({'models': {'test': {'materialized': 'view'}}})
    -        results = self.run_dbt(['ls', '--resource-type', 'model', '--select', 'state:modified', '--state', './state'])
    -        assert len(results) == 1
    -        assert results[0] == 'test.view_model'
    -
    -    @use_profile('postgres')
    -    def test_postgres_changed_model_contents(self):
    -        results = self.run_dbt(['run', '--models', 'state:modified', '--state', './state'])
    -        assert len(results) == 0
    -
    -        with open('models/table_model.sql') as fp:
    -            fp.readline()
    -            newline = fp.newlines
    -
    -        with open('models/table_model.sql', 'w') as fp:
    -            fp.write("{{ config(materialized='table') }}")
    -            fp.write(newline)
    -            fp.write("select * from {{ ref('seed') }}")
    -            fp.write(newline)
    -
    -        results = self.run_dbt(['run', '--models', 'state:modified', '--state', './state'])
    -        assert len(results) == 1
    -        assert results[0].node.name == 'table_model'
    -
    -    @use_profile('postgres')
    -    def test_postgres_new_macro(self):
    -        with open('macros/macros.sql') as fp:
    -            fp.readline()
    -            newline = fp.newlines
    -
    -        new_macro = '{% macro my_other_macro() %}{% endmacro %}' + newline
    -
    -        # add a new macro to a new file
    -        with open('macros/second_macro.sql', 'w') as fp:
    -            fp.write(new_macro)
    -
    -        results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'])
    -        assert len(results) == 0
    -
    -        os.remove('macros/second_macro.sql')
    -        # add a new macro to the existing file
    -        with open('macros/macros.sql', 'a') as fp:
    -            fp.write(new_macro)
    -
    -        results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'])
    -        assert len(results) == 0
    -
    -    @use_profile('postgres')
    -    def test_postgres_changed_macro_contents(self):
    -        with open('macros/macros.sql') as fp:
    -            fp.readline()
    -            newline = fp.newlines
    -
    -        # modify an existing macro
    -        with open('macros/macros.sql', 'w') as fp:
    -            fp.write("{% macro my_macro() %}")
    -            fp.write(newline)
    -            fp.write("    {% do log('in a macro', info=True) %}")
    -            fp.write(newline)
    -            fp.write('{% endmacro %}')
    -            fp.write(newline)
    -
    -        # table_model calls this macro
    -        results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'])
    -        assert len(results) == 1
    -
    -    @use_profile('postgres')
    -    def test_postgres_changed_exposure(self):
    -        with open('models/exposures.yml', 'a') as fp:
    -            fp.write('      name: John Doe\n')
    -
    -        results, stdout = self.run_dbt_and_capture(['run', '--models', '+state:modified', '--state', './state'])
    -        assert len(results) == 1
    -        assert results[0].node.name == 'view_model'
    -
    -    @use_profile('postgres')
    -    def test_postgres_previous_version_manifest(self):
    -        # This tests that a different schema version in the file throws an error
    -        with self.assertRaises(IncompatibleSchemaException) as exc:
    -            results = self.run_dbt(['ls', '-s',  'state:modified',  '--state',  './previous_state'])
    -            self.assertEqual(exc.CODE, 10014)
    diff --git a/test/integration/062_defer_state_tests/test_run_results_state.py b/test/integration/062_defer_state_tests/test_run_results_state.py
    deleted file mode 100644
    index 4f59c6faa75..00000000000
    --- a/test/integration/062_defer_state_tests/test_run_results_state.py
    +++ /dev/null
    @@ -1,436 +0,0 @@
    -from test.integration.base import DBTIntegrationTest, use_profile
    -import os
    -import random
    -import shutil
    -import string
    -
    -import pytest
    -
    -from dbt.exceptions import CompilationException
    -
    -
    -class TestRunResultsState(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return "run_results_state_062"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'macro-paths': ['macros'],
    -            'seeds': {
    -                'test': {
    -                    'quote_columns': True,
    -                }
    -            }
    -        }
    -
    -    def _symlink_test_folders(self):
    -        # dbt's normal symlink behavior breaks this test. Copy the files
    -        # so we can freely modify them.
    -        for entry in os.listdir(self.test_original_source_path):
    -            src = os.path.join(self.test_original_source_path, entry)
    -            tst = os.path.join(self.test_root_dir, entry)
    -            if entry in {'models', 'seeds', 'macros'}:
    -                shutil.copytree(src, tst)
    -            elif os.path.isdir(entry) or entry.endswith('.sql'):
    -                os.symlink(src, tst)
    -
    -    def copy_state(self):
    -        assert not os.path.exists('state')
    -        os.makedirs('state')
    -        shutil.copyfile('target/manifest.json', 'state/manifest.json')
    -        shutil.copyfile('target/run_results.json', 'state/run_results.json')
    -
    -    def setUp(self):
    -        super().setUp()
    -        self.run_dbt(['build'])
    -        self.copy_state()
    -    
    -    def rebuild_run_dbt(self, expect_pass=True):
    -        shutil.rmtree('./state')
    -        self.run_dbt(['build'], expect_pass=expect_pass)
    -        self.copy_state()
    -
    -    @use_profile('postgres')
    -    def test_postgres_seed_run_results_state(self):
    -        shutil.rmtree('./state')
    -        self.run_dbt(['seed'])
    -        self.copy_state()
    -        results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'result:success', '--state', './state'], expect_pass=True)
    -        assert len(results) == 1
    -        assert results[0] == 'test.seed'
    -
    -        results = self.run_dbt(['ls', '--select', 'result:success', '--state', './state'])
    -        assert len(results) == 1
    -        assert results[0] == 'test.seed'
    -
    -        results = self.run_dbt(['ls', '--select', 'result:success+', '--state', './state'])
    -        assert len(results) == 7
    -        assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'}
    -
    -        with open('seeds/seed.csv') as fp:
    -            fp.readline()
    -            newline = fp.newlines
    -        with open('seeds/seed.csv', 'a') as fp:
    -            fp.write(f'\"\'\'3,carl{newline}')
    -        shutil.rmtree('./state')
    -        self.run_dbt(['seed'], expect_pass=False)
    -        self.copy_state()
    -
    -        results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'result:error', '--state', './state'], expect_pass=True)
    -        assert len(results) == 1
    -        assert results[0] == 'test.seed'
    -
    -        results = self.run_dbt(['ls', '--select', 'result:error', '--state', './state'])
    -        assert len(results) == 1
    -        assert results[0] == 'test.seed'
    -
    -        results = self.run_dbt(['ls', '--select', 'result:error+', '--state', './state'])
    -        assert len(results) == 7
    -        assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'}
    -
    -
    -        with open('seeds/seed.csv') as fp:
    -            fp.readline()
    -            newline = fp.newlines
    -        with open('seeds/seed.csv', 'a') as fp:
    -            # assume each line is ~2 bytes + len(name)
    -            target_size = 1*1024*1024
    -            line_size = 64
    -
    -            num_lines = target_size // line_size
    -
    -            maxlines = num_lines + 4
    -
    -            for idx in range(4, maxlines):
    -                value = ''.join(random.choices(string.ascii_letters, k=62))
    -                fp.write(f'{idx},{value}{newline}')
    -        shutil.rmtree('./state')
    -        self.run_dbt(['seed'], expect_pass=False)
    -        self.copy_state()
    -
    -        results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'result:error', '--state', './state'], expect_pass=True)
    -        assert len(results) == 1
    -        assert results[0] == 'test.seed'
    -
    -        results = self.run_dbt(['ls', '--select', 'result:error', '--state', './state'])
    -        assert len(results) == 1
    -        assert results[0] == 'test.seed'
    -
    -        results = self.run_dbt(['ls', '--select', 'result:error+', '--state', './state'])
    -        assert len(results) == 7
    -        assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'}
    -
    -    @use_profile('postgres')
    -    def test_postgres_build_run_results_state(self):
    -        results = self.run_dbt(['build', '--select', 'result:error', '--state', './state'])
    -        assert len(results) == 0
    -
    -        with open('models/view_model.sql') as fp:
    -            fp.readline()
    -            newline = fp.newlines
    -
    -        with open('models/view_model.sql', 'w') as fp:
    -            fp.write(newline)
    -            fp.write("select * from forced_error")
    -            fp.write(newline)
    -        
    -        self.rebuild_run_dbt(expect_pass=False)
    -
    -        results = self.run_dbt(['build', '--select', 'result:error', '--state', './state'], expect_pass=False)
    -        assert len(results) == 3
    -        nodes = set([elem.node.name for elem in results])
    -        assert nodes == {'view_model', 'not_null_view_model_id','unique_view_model_id'}
    -
    -        results = self.run_dbt(['ls', '--select', 'result:error', '--state', './state'])
    -        assert len(results) == 3
    -        assert set(results) == {'test.view_model', 'test.not_null_view_model_id', 'test.unique_view_model_id'}
    -
    -        results = self.run_dbt(['build', '--select', 'result:error+', '--state', './state'], expect_pass=False)
    -        assert len(results) == 4
    -        nodes = set([elem.node.name for elem in results])
    -        assert nodes == {'table_model','view_model', 'not_null_view_model_id','unique_view_model_id'}
    -
    -        results = self.run_dbt(['ls', '--select', 'result:error+', '--state', './state'])
    -        assert len(results) == 6 # includes exposure
    -        assert set(results) == {'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'}
    -
    -        # test failure on build tests
    -        # fail the unique test
    -        with open('models/view_model.sql', 'w') as fp:
    -            fp.write(newline)
    -            fp.write("select 1 as id union all select 1 as id")
    -            fp.write(newline)
    -        
    -        self.rebuild_run_dbt(expect_pass=False)
    -
    -        results = self.run_dbt(['build', '--select', 'result:fail', '--state', './state'], expect_pass=False)
    -        assert len(results) == 1
    -        assert results[0].node.name == 'unique_view_model_id'
    -
    -        results = self.run_dbt(['ls', '--select', 'result:fail', '--state', './state'])
    -        assert len(results) == 1
    -        assert results[0] == 'test.unique_view_model_id'
    -
    -        results = self.run_dbt(['build', '--select', 'result:fail+', '--state', './state'], expect_pass=False)
    -        assert len(results) == 2
    -        nodes = set([elem.node.name for elem in results])
    -        assert nodes == {'table_model', 'unique_view_model_id'}
    -
    -        results = self.run_dbt(['ls', '--select', 'result:fail+', '--state', './state'])
    -        assert len(results) == 1
    -        assert set(results) == {'test.unique_view_model_id'}
    -
    -        # change the unique test severity from error to warn and reuse the same view_model.sql changes above
    -        f = open('models/schema.yml', 'r')
    -        filedata = f.read()
    -        f.close()
    -        newdata = filedata.replace('error','warn')
    -        f = open('models/schema.yml', 'w')
    -        f.write(newdata)
    -        f.close()
    -
    -        self.rebuild_run_dbt(expect_pass=True)
    -
    -        results = self.run_dbt(['build', '--select', 'result:warn', '--state', './state'], expect_pass=True)
    -        assert len(results) == 1
    -        assert results[0].node.name == 'unique_view_model_id'
    -
    -        results = self.run_dbt(['ls', '--select', 'result:warn', '--state', './state'])
    -        assert len(results) == 1
    -        assert results[0] == 'test.unique_view_model_id'
    -
    -        results = self.run_dbt(['build', '--select', 'result:warn+', '--state', './state'], expect_pass=True)
    -        assert len(results) == 2 # includes table_model to be run
    -        nodes = set([elem.node.name for elem in results])
    -        assert nodes == {'table_model', 'unique_view_model_id'}
    -
    -        results = self.run_dbt(['ls', '--select', 'result:warn+', '--state', './state'])
    -        assert len(results) == 1
    -        assert set(results) == {'test.unique_view_model_id'}
    -
    -    @use_profile('postgres')
    -    def test_postgres_run_run_results_state(self):
    -        results = self.run_dbt(['run', '--select', 'result:success', '--state', './state'], expect_pass=True)
    -        assert len(results) == 2
    -        assert results[0].node.name == 'view_model'
    -        assert results[1].node.name == 'table_model'
    -        
    -        # clear state and rerun upstream view model to test + operator
    -        shutil.rmtree('./state')
    -        self.run_dbt(['run', '--select', 'view_model'], expect_pass=True)
    -        self.copy_state()
    -        results = self.run_dbt(['run', '--select', 'result:success+', '--state', './state'], expect_pass=True)
    -        assert len(results) == 2
    -        assert results[0].node.name == 'view_model'
    -        assert results[1].node.name == 'table_model'
    -
    -        # check we are starting from a place with 0 errors
    -        results = self.run_dbt(['run', '--select', 'result:error', '--state', './state'])
    -        assert len(results) == 0
    -        
    -        # force an error in the view model to test error and skipped states
    -        with open('models/view_model.sql') as fp:
    -            fp.readline()
    -            newline = fp.newlines
    -
    -        with open('models/view_model.sql', 'w') as fp:
    -            fp.write(newline)
    -            fp.write("select * from forced_error")
    -            fp.write(newline)
    -        
    -        shutil.rmtree('./state')
    -        self.run_dbt(['run'], expect_pass=False)
    -        self.copy_state()
    -
    -        # test single result selector on error
    -        results = self.run_dbt(['run', '--select', 'result:error', '--state', './state'], expect_pass=False)
    -        assert len(results) == 1
    -        assert results[0].node.name == 'view_model'
    -        
    -        # test + operator selection on error
    -        results = self.run_dbt(['run', '--select', 'result:error+', '--state', './state'], expect_pass=False)
    -        assert len(results) == 2
    -        assert results[0].node.name == 'view_model'
    -        assert results[1].node.name == 'table_model'
    -
    -        # single result selector on skipped. Expect this to pass becase underlying view already defined above
    -        results = self.run_dbt(['run', '--select', 'result:skipped', '--state', './state'], expect_pass=True)
    -        assert len(results) == 1
    -        assert results[0].node.name == 'table_model'
    -
    -        # add a downstream model that depends on table_model for skipped+ selector
    -        with open('models/table_model_downstream.sql', 'w') as fp:
    -            fp.write("select * from {{ref('table_model')}}")
    -        
    -        shutil.rmtree('./state')
    -        self.run_dbt(['run'], expect_pass=False)
    -        self.copy_state()
    -
    -        results = self.run_dbt(['run', '--select', 'result:skipped+', '--state', './state'], expect_pass=True)
    -        assert len(results) == 2
    -        assert results[0].node.name == 'table_model'
    -        assert results[1].node.name == 'table_model_downstream'
    -    
    -    
    -    @use_profile('postgres')
    -    def test_postgres_test_run_results_state(self):
    -        # run passed nodes
    -        results = self.run_dbt(['test', '--select', 'result:pass', '--state', './state'], expect_pass=True)
    -        assert len(results) == 2
    -        nodes = set([elem.node.name for elem in results])
    -        assert nodes == {'unique_view_model_id', 'not_null_view_model_id'}
    -        
    -        # run passed nodes with + operator
    -        results = self.run_dbt(['test', '--select', 'result:pass+', '--state', './state'], expect_pass=True)
    -        assert len(results) == 2
    -        nodes = set([elem.node.name for elem in results])
    -        assert nodes == {'unique_view_model_id', 'not_null_view_model_id'}
    -
    -        # update view model to generate a failure case
    -        os.remove('./models/view_model.sql')
    -        with open('models/view_model.sql', 'w') as fp:
    -            fp.write("select 1 as id union all select 1 as id")
    -        
    -        self.rebuild_run_dbt(expect_pass=False)
    -
    -        # test with failure selector
    -        results = self.run_dbt(['test', '--select', 'result:fail', '--state', './state'], expect_pass=False)
    -        assert len(results) == 1
    -        assert results[0].node.name == 'unique_view_model_id'
    -
    -        # test with failure selector and + operator
    -        results = self.run_dbt(['test', '--select', 'result:fail+', '--state', './state'], expect_pass=False)
    -        assert len(results) == 1
    -        assert results[0].node.name == 'unique_view_model_id'
    -
    -        # change the unique test severity from error to warn and reuse the same view_model.sql changes above
    -        with open('models/schema.yml', 'r+') as f:
    -            filedata = f.read()
    -            newdata = filedata.replace('error','warn')
    -            f.seek(0)
    -            f.write(newdata)
    -            f.truncate()
    -        
    -        # rebuild - expect_pass = True because we changed the error to a warning this time around
    -        self.rebuild_run_dbt(expect_pass=True)
    -
    -        # test with warn selector
    -        results = self.run_dbt(['test', '--select', 'result:warn', '--state', './state'], expect_pass=True)
    -        assert len(results) == 1
    -        assert results[0].node.name == 'unique_view_model_id'
    -
    -        # test with warn selector and + operator
    -        results = self.run_dbt(['test', '--select', 'result:warn+', '--state', './state'], expect_pass=True)
    -        assert len(results) == 1
    -        assert results[0].node.name == 'unique_view_model_id'
    -
    -
    -    @use_profile('postgres')
    -    def test_postgres_concurrent_selectors_run_run_results_state(self):
    -        results = self.run_dbt(['run', '--select', 'state:modified+', 'result:error+', '--state', './state'])
    -        assert len(results) == 0
    -
    -        # force an error on a dbt model
    -        with open('models/view_model.sql') as fp:
    -            fp.readline()
    -            newline = fp.newlines
    -
    -        with open('models/view_model.sql', 'w') as fp:
    -            fp.write(newline)
    -            fp.write("select * from forced_error")
    -            fp.write(newline)
    -        
    -        shutil.rmtree('./state')
    -        self.run_dbt(['run'], expect_pass=False)
    -        self.copy_state()
    -
    -        # modify another dbt model
    -        with open('models/table_model_modified_example.sql', 'w') as fp:
    -            fp.write(newline)
    -            fp.write("select * from forced_error")
    -            fp.write(newline)
    -        
    -        results = self.run_dbt(['run', '--select', 'state:modified+', 'result:error+', '--state', './state'], expect_pass=False)
    -        assert len(results) == 3
    -        nodes = set([elem.node.name for elem in results])
    -        assert nodes == {'view_model', 'table_model_modified_example', 'table_model'}
    -    
    -
    -    @use_profile('postgres')
    -    def test_postgres_concurrent_selectors_test_run_results_state(self):
    -        # create failure test case for result:fail selector
    -        os.remove('./models/view_model.sql')
    -        with open('./models/view_model.sql', 'w') as f:
    -            f.write('select 1 as id union all select 1 as id union all select null as id')
    -
    -        # run dbt build again to trigger test errors
    -        self.rebuild_run_dbt(expect_pass=False)
    -        
    -        # get the failures from 
    -        results = self.run_dbt(['test', '--select', 'result:fail', '--exclude', 'not_null_view_model_id', '--state', './state'], expect_pass=False)
    -        assert len(results) == 1
    -        nodes = set([elem.node.name for elem in results])
    -        assert nodes == {'unique_view_model_id'}
    -        
    -        
    -    @use_profile('postgres')
    -    def test_postgres_concurrent_selectors_build_run_results_state(self):
    -        results = self.run_dbt(['build', '--select', 'state:modified+', 'result:error+', '--state', './state'])
    -        assert len(results) == 0
    -
    -        # force an error on a dbt model
    -        with open('models/view_model.sql') as fp:
    -            fp.readline()
    -            newline = fp.newlines
    -
    -        with open('models/view_model.sql', 'w') as fp:
    -            fp.write(newline)
    -            fp.write("select * from forced_error")
    -            fp.write(newline)
    -        
    -        self.rebuild_run_dbt(expect_pass=False)
    -
    -        # modify another dbt model
    -        with open('models/table_model_modified_example.sql', 'w') as fp:
    -            fp.write(newline)
    -            fp.write("select * from forced_error")
    -            fp.write(newline)
    -        
    -        results = self.run_dbt(['build', '--select', 'state:modified+', 'result:error+', '--state', './state'], expect_pass=False)
    -        assert len(results) == 5
    -        nodes = set([elem.node.name for elem in results])
    -        assert nodes == {'table_model_modified_example', 'view_model', 'table_model', 'not_null_view_model_id', 'unique_view_model_id'}
    -        
    -        # create failure test case for result:fail selector
    -        os.remove('./models/view_model.sql')
    -        with open('./models/view_model.sql', 'w') as f:
    -            f.write('select 1 as id union all select 1 as id')
    -
    -        # create error model case for result:error selector
    -        with open('./models/error_model.sql', 'w') as f:
    -            f.write('select 1 as id from not_exists')
    -        
    -        # create something downstream from the error model to rerun
    -        with open('./models/downstream_of_error_model.sql', 'w') as f:
    -            f.write('select * from {{ ref("error_model") }} )')
    -        
    -        # regenerate build state
    -        self.rebuild_run_dbt(expect_pass=False)
    -
    -        # modify model again to trigger the state:modified selector 
    -        with open('models/table_model_modified_example.sql', 'w') as fp:
    -            fp.write(newline)
    -            fp.write("select * from forced_another_error")
    -            fp.write(newline)
    -        
    -        results = self.run_dbt(['build', '--select', 'state:modified+', 'result:error+', 'result:fail+', '--state', './state'], expect_pass=False)
    -        assert len(results) == 5
    -        nodes = set([elem.node.name for elem in results])
    -        assert nodes == {'error_model', 'downstream_of_error_model', 'table_model_modified_example', 'table_model', 'unique_view_model_id'}
    diff --git a/test/integration/064_column_comments_tests/models/quote_model.sql b/test/integration/064_column_comments_tests/models/quote_model.sql
    deleted file mode 100644
    index 2255b4bd7f0..00000000000
    --- a/test/integration/064_column_comments_tests/models/quote_model.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as {{ adapter.quote("2id") }}
    diff --git a/test/integration/064_column_comments_tests/models/schema.yml b/test/integration/064_column_comments_tests/models/schema.yml
    deleted file mode 100644
    index 1e82165fabf..00000000000
    --- a/test/integration/064_column_comments_tests/models/schema.yml
    +++ /dev/null
    @@ -1,9 +0,0 @@
    -version: 2
    -models:
    -  - name: quote_model
    -    description: "model to test column quotes and comments" 
    -    columns:
    -      - name: 2id
    -        description: "XXX My description"
    -        quote: true
    -
    diff --git a/test/integration/064_column_comments_tests/test_column_comments.py b/test/integration/064_column_comments_tests/test_column_comments.py
    deleted file mode 100644
    index bd94b642cb6..00000000000
    --- a/test/integration/064_column_comments_tests/test_column_comments.py
    +++ /dev/null
    @@ -1,43 +0,0 @@
    -import json
    -
    -from test.integration.base import DBTIntegrationTest, use_profile
    -
    -
    -class TestColumnComment(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return "column_comment_060"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'models': {
    -                'test': {
    -                    'materialized': 'table',
    -                    '+persist_docs': {
    -                        "relation": True,
    -                        "columns": True,
    -                    },
    -                }
    -            }
    -        }
    -
    -    def run_has_comments(self):
    -        self.run_dbt()
    -        self.run_dbt(['docs', 'generate'])
    -        with open('target/catalog.json') as fp:
    -            catalog_data = json.load(fp)
    -        assert 'nodes' in catalog_data
    -        assert len(catalog_data['nodes']) == 1
    -        column_node = catalog_data['nodes']['model.test.quote_model']
    -        column_comment = column_node['columns']['2id']['comment']
    -        assert column_comment.startswith('XXX')
    -
    -    @use_profile('postgres')
    -    def test_postgres_comments(self):
    -        self.run_has_comments()
    diff --git a/test/integration/068_partial_parsing_tests/local_dependency/dbt_project.yml b/test/integration/068_partial_parsing_tests/local_dependency/dbt_project.yml
    deleted file mode 100644
    index d56280a5577..00000000000
    --- a/test/integration/068_partial_parsing_tests/local_dependency/dbt_project.yml
    +++ /dev/null
    @@ -1,23 +0,0 @@
    -
    -name: 'local_dep'
    -version: '1.0'
    -config-version: 2
    -
    -profile: 'default'
    -
    -model-paths: ["models"]
    -analysis-paths: ["analyses"]
    -test-paths: ["tests"]
    -seed-paths: ["seeds"]
    -macro-paths: ["macros"]
    -
    -require-dbt-version: '>=0.1.0'
    -
    -target-path: "target"  # directory which will store compiled SQL files
    -clean-targets:         # directories to be removed by `dbt clean`
    -    - "target"
    -    - "dbt_packages"
    -
    -
    -seeds:
    -  quote_columns: False
    diff --git a/test/integration/068_partial_parsing_tests/local_dependency/macros/dep_macro.sql b/test/integration/068_partial_parsing_tests/local_dependency/macros/dep_macro.sql
    deleted file mode 100644
    index 81e9a0faeef..00000000000
    --- a/test/integration/068_partial_parsing_tests/local_dependency/macros/dep_macro.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{% macro some_overridden_macro() -%}
    -100
    -{%- endmacro %}
    diff --git a/test/integration/068_partial_parsing_tests/local_dependency/models/model_to_import.sql b/test/integration/068_partial_parsing_tests/local_dependency/models/model_to_import.sql
    deleted file mode 100644
    index 4b91aa0f2fa..00000000000
    --- a/test/integration/068_partial_parsing_tests/local_dependency/models/model_to_import.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select * from {{ ref('seed') }}
    diff --git a/test/integration/068_partial_parsing_tests/local_dependency/models/schema.yml b/test/integration/068_partial_parsing_tests/local_dependency/models/schema.yml
    deleted file mode 100644
    index 3d804a7c153..00000000000
    --- a/test/integration/068_partial_parsing_tests/local_dependency/models/schema.yml
    +++ /dev/null
    @@ -1,10 +0,0 @@
    -version: 2
    -sources:
    -  - name: seed_source
    -    schema: "{{ var('schema_override', target.schema) }}"
    -    tables:
    -      - name: "seed"
    -        columns:
    -          - name: id
    -            tests:
    -              - unique
    diff --git a/test/integration/068_partial_parsing_tests/local_dependency/seeds/seed.csv b/test/integration/068_partial_parsing_tests/local_dependency/seeds/seed.csv
    deleted file mode 100644
    index 3ff3deb87eb..00000000000
    --- a/test/integration/068_partial_parsing_tests/local_dependency/seeds/seed.csv
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -id
    -1
    diff --git a/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests1.sql b/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests1.sql
    deleted file mode 100644
    index 0f64eb17c0d..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests1.sql
    +++ /dev/null
    @@ -1,19 +0,0 @@
    -{% test type_one(model) %}
    -
    -    select * from (
    -
    -        select * from {{ model }}
    -        union all
    -        select * from {{ ref('model_b') }}
    -
    -    ) as Foo
    -
    -{% endtest %}
    -
    -{% test type_two(model) %}
    -
    -    {{ config(severity = "WARN") }}
    -
    -    select * from {{ model }}
    -
    -{% endtest %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests2.sql b/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests2.sql
    deleted file mode 100644
    index ba5b53fa5a9..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests2.sql
    +++ /dev/null
    @@ -1,19 +0,0 @@
    -{% test type_one(model) %}
    -
    -    select * from (
    -
    -        select * from {{ model }}
    -        union all
    -        select * from {{ ref('model_b') }}
    -
    -    ) as Foo
    -
    -{% endtest %}
    -
    -{% test type_two(model) %}
    -
    -    {{ config(severity = "ERROR") }}
    -
    -    select * from {{ model }}
    -
    -{% endtest %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/customers.sql b/test/integration/068_partial_parsing_tests/test-files/customers.sql
    deleted file mode 100644
    index 98e19b557eb..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/customers.sql
    +++ /dev/null
    @@ -1,19 +0,0 @@
    -with source as (
    -
    -    select * from {{ source('seed_sources', 'raw_customers') }}
    -
    -),
    -
    -renamed as (
    -
    -    select
    -        id as customer_id,
    -        first_name,
    -        last_name,
    -        email
    -
    -    from source
    -
    -)
    -
    -select * from renamed
    diff --git a/test/integration/068_partial_parsing_tests/test-files/customers1.md b/test/integration/068_partial_parsing_tests/test-files/customers1.md
    deleted file mode 100644
    index bba48335825..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/customers1.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -{% docs customer_table %}
    -
    -This table contains customer data
    -
    -{% enddocs %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/customers2.md b/test/integration/068_partial_parsing_tests/test-files/customers2.md
    deleted file mode 100644
    index f8306f34e49..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/customers2.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -{% docs customer_table %}
    -
    -LOTS of customer data
    -
    -{% enddocs %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/empty_schema.yml b/test/integration/068_partial_parsing_tests/test-files/empty_schema.yml
    deleted file mode 100644
    index e69de29bb2d..00000000000
    diff --git a/test/integration/068_partial_parsing_tests/test-files/empty_schema_with_version.yml b/test/integration/068_partial_parsing_tests/test-files/empty_schema_with_version.yml
    deleted file mode 100644
    index 22817d2a9c7..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/empty_schema_with_version.yml
    +++ /dev/null
    @@ -1 +0,0 @@
    -version: 2
    diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var-sources.yml b/test/integration/068_partial_parsing_tests/test-files/env_var-sources.yml
    deleted file mode 100644
    index 2b5809b1cb9..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/env_var-sources.yml
    +++ /dev/null
    @@ -1,18 +0,0 @@
    -version: 2
    -sources:
    -  - name: seed_sources
    -    schema: "{{ target.schema }}"
    -    database: "{{ env_var('ENV_VAR_DATABASE') }}"
    -    tables:
    -      - name: raw_customers
    -        columns:
    -          - name: id
    -            tests:
    -              - not_null:
    -                  severity: "{{ env_var('ENV_VAR_SEVERITY') }}"
    -              - unique
    -          - name: first_name
    -          - name: last_name
    -          - name: email
    -
    -
    diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_macro.sql b/test/integration/068_partial_parsing_tests/test-files/env_var_macro.sql
    deleted file mode 100644
    index 0bf3eda6c07..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/env_var_macro.sql
    +++ /dev/null
    @@ -1,7 +0,0 @@
    -{% macro do_something(foo2, bar2) %}
    -
    -    select
    -        '{{ foo2 }}' as foo2,
    -        '{{ bar2 }}' as bar2
    -
    -{% endmacro %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_macros.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_macros.yml
    deleted file mode 100644
    index 8888f65237d..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/env_var_macros.yml
    +++ /dev/null
    @@ -1,7 +0,0 @@
    -version: 2
    -macros:
    -    - name: do_something
    -      description: "This is a test macro"
    -      meta:
    -          some_key: "{{ env_var('ENV_VAR_SOME_KEY') }}"
    -
    diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_metrics.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_metrics.yml
    deleted file mode 100644
    index b8112fea010..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/env_var_metrics.yml
    +++ /dev/null
    @@ -1,30 +0,0 @@
    -version: 2
    -
    -metrics:
    -
    -  - model: "ref('people')"
    -    name: number_of_people
    -    description: Total count of people
    -    label: "Number of people"
    -    calculation_method: count
    -    expression: "*"
    -    timestamp: created_at
    -    time_grains: [day, week, month]
    -    dimensions:
    -      - favorite_color
    -      - loves_dbt
    -    meta:
    -        my_meta: '{{ env_var("ENV_VAR_METRICS") }}'
    -
    -  - model: "ref('people')"
    -    name: collective_tenure
    -    description: Total number of years of team experience
    -    label: "Collective tenure"
    -    calculation_method: sum
    -    expression: tenure
    -    timestamp: created_at
    -    time_grains: [day]
    -    filters:
    -      - field: loves_dbt
    -        operator: is
    -        value: 'true'
    diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_model.sql b/test/integration/068_partial_parsing_tests/test-files/env_var_model.sql
    deleted file mode 100644
    index a926d16d9d8..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/env_var_model.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select '{{ env_var('ENV_VAR_TEST') }}' as vartest
    diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_model_one.sql b/test/integration/068_partial_parsing_tests/test-files/env_var_model_one.sql
    deleted file mode 100644
    index e1875231d2e..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/env_var_model_one.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 'blue' as fun
    diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_model_test.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_model_test.yml
    deleted file mode 100644
    index 147b96de1b6..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/env_var_model_test.yml
    +++ /dev/null
    @@ -1,8 +0,0 @@
    -version: 2
    -models:
    -  - name: model_color
    -    columns:
    -      - name: fun
    -        tests:
    -          - unique:
    -              enabled: "{{ env_var('ENV_VAR_ENABLED', True) }}"
    diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml
    deleted file mode 100644
    index f8cf1ed9d67..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml
    +++ /dev/null
    @@ -1,6 +0,0 @@
    -version: 2
    -
    -models:
    -    - name: model_one
    -      config:
    -        materialized: "{{ env_var('TEST_SCHEMA_VAR') }}"
    diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_schema2.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_schema2.yml
    deleted file mode 100644
    index b1f3f079f6a..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/env_var_schema2.yml
    +++ /dev/null
    @@ -1,11 +0,0 @@
    -version: 2
    -
    -models:
    -    - name: model_one
    -      config:
    -        materialized: "{{ env_var('TEST_SCHEMA_VAR') }}"
    -      tests:
    -        - check_color:
    -            column_name: fun
    -            color: "env_var('ENV_VAR_COLOR')"
    -
    diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_schema3.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_schema3.yml
    deleted file mode 100644
    index 3b0409637db..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/env_var_schema3.yml
    +++ /dev/null
    @@ -1,21 +0,0 @@
    -version: 2
    -
    -models:
    -    - name: model_one
    -      config:
    -        materialized: "{{ env_var('TEST_SCHEMA_VAR') }}"
    -      tests:
    -        - check_color:
    -            column_name: fun
    -            color: "env_var('ENV_VAR_COLOR')"
    -
    -exposures:
    -  - name: proxy_for_dashboard
    -    description: "This is for the XXX dashboard"
    -    type: "dashboard"
    -    owner:
    -      name: "{{ env_var('ENV_VAR_OWNER') }}"
    -      email: "tester@dashboard.com"
    -    depends_on:
    -      - ref("model_color")
    -      - source("seed_sources", "raw_customers")
    diff --git a/test/integration/068_partial_parsing_tests/test-files/generic_schema.yml b/test/integration/068_partial_parsing_tests/test-files/generic_schema.yml
    deleted file mode 100644
    index 9a44074728a..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/generic_schema.yml
    +++ /dev/null
    @@ -1,9 +0,0 @@
    -version: 2
    -
    -models:
    -  - name: orders
    -    description: "Some order data"
    -    columns:
    -      - name: id
    -        tests:
    -          - unique
    diff --git a/test/integration/068_partial_parsing_tests/test-files/generic_test.sql b/test/integration/068_partial_parsing_tests/test-files/generic_test.sql
    deleted file mode 100644
    index ca09beaadb7..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/generic_test.sql
    +++ /dev/null
    @@ -1,26 +0,0 @@
    -{% test is_odd(model, column_name) %}
    -
    -with validation as (
    -
    -    select
    -        {{ column_name }} as odd_field
    -
    -    from {{ model }}
    -
    -),
    -
    -validation_errors as (
    -
    -    select
    -        odd_field
    -
    -    from validation
    -    -- if this is true, then odd_field is actually even!
    -    where (odd_field % 2) = 0
    -
    -)
    -
    -select *
    -from validation_errors
    -
    -{% endtest %}
    \ No newline at end of file
    diff --git a/test/integration/068_partial_parsing_tests/test-files/generic_test_edited.sql b/test/integration/068_partial_parsing_tests/test-files/generic_test_edited.sql
    deleted file mode 100644
    index 5a3b611ff7a..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/generic_test_edited.sql
    +++ /dev/null
    @@ -1,26 +0,0 @@
    -{% test is_odd(model, column_name) %}
    -
    -with validation as (
    -
    -    select
    -        {{ column_name }} as odd_field2
    -
    -    from {{ model }}
    -
    -),
    -
    -validation_errors as (
    -
    -    select
    -        odd_field2
    -
    -    from validation
    -    -- if this is true, then odd_field is actually even!
    -    where (odd_field2 % 2) = 0
    -
    -)
    -
    -select *
    -from validation_errors
    -
    -{% endtest %}
    \ No newline at end of file
    diff --git a/test/integration/068_partial_parsing_tests/test-files/generic_test_schema.yml b/test/integration/068_partial_parsing_tests/test-files/generic_test_schema.yml
    deleted file mode 100644
    index c8307bc1021..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/generic_test_schema.yml
    +++ /dev/null
    @@ -1,10 +0,0 @@
    -version: 2
    -
    -models:
    -  - name: orders
    -    description: "Some order data"
    -    columns:
    -      - name: id
    -        tests:
    -          - unique
    -          - is_odd
    diff --git a/test/integration/068_partial_parsing_tests/test-files/gsm_override.sql b/test/integration/068_partial_parsing_tests/test-files/gsm_override.sql
    deleted file mode 100644
    index 46c7a39ddaa..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/gsm_override.sql
    +++ /dev/null
    @@ -1,6 +0,0 @@
    -- custom macro
    -{% macro generate_schema_name(schema_name, node) %}
    -
    -    {{ schema_name }}_{{ target.schema }}
    -
    -{% endmacro %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/gsm_override2.sql b/test/integration/068_partial_parsing_tests/test-files/gsm_override2.sql
    deleted file mode 100644
    index 1bfddb9dadb..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/gsm_override2.sql
    +++ /dev/null
    @@ -1,6 +0,0 @@
    -- custom macro xxxx
    -{% macro generate_schema_name(schema_name, node) %}
    -
    -    {{ schema_name }}_{{ target.schema }}
    -
    -{% endmacro %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/macros-schema.yml b/test/integration/068_partial_parsing_tests/test-files/macros-schema.yml
    deleted file mode 100644
    index cf221dec670..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/macros-schema.yml
    +++ /dev/null
    @@ -1,8 +0,0 @@
    -
    -version: 2
    -
    -models:
    -    - name: model_a
    -      tests:
    -        - type_one
    -        - type_two
    diff --git a/test/integration/068_partial_parsing_tests/test-files/macros.yml b/test/integration/068_partial_parsing_tests/test-files/macros.yml
    deleted file mode 100644
    index 9ee72fad0ea..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/macros.yml
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -version: 2
    -macros:
    -    - name: do_something
    -      description: "This is a test macro"
    diff --git a/test/integration/068_partial_parsing_tests/test-files/metric_model_a.sql b/test/integration/068_partial_parsing_tests/test-files/metric_model_a.sql
    deleted file mode 100644
    index 010a0c29a02..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/metric_model_a.sql
    +++ /dev/null
    @@ -1,21 +0,0 @@
    -{%
    -    set metric_list = [
    -        metric('number_of_people'),
    -        metric('collective_tenure')
    -    ]
    -%}
    -
    -{% if not execute %}                                
    -        
    -    {% set metric_names = [] %}                                         
    -    {% for m in metric_list %}             
    -        {% do metric_names.append(m.metric_name) %}           
    -    {% endfor %}                                    
    -                                 
    -    -- this config does nothing, but it lets us check these values
    -    {{ config(metric_names = metric_names) }}       
    -                             
    -{% endif %}
    - 
    -
    -select 1 as fun
    diff --git a/test/integration/068_partial_parsing_tests/test-files/model_a.sql b/test/integration/068_partial_parsing_tests/test-files/model_a.sql
    deleted file mode 100644
    index 3bd54a4c1b6..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/model_a.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as fun
    diff --git a/test/integration/068_partial_parsing_tests/test-files/model_b.sql b/test/integration/068_partial_parsing_tests/test-files/model_b.sql
    deleted file mode 100644
    index 01f38b0698e..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/model_b.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as notfun
    diff --git a/test/integration/068_partial_parsing_tests/test-files/model_color.sql b/test/integration/068_partial_parsing_tests/test-files/model_color.sql
    deleted file mode 100644
    index e1875231d2e..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/model_color.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 'blue' as fun
    diff --git a/test/integration/068_partial_parsing_tests/test-files/model_four1.sql b/test/integration/068_partial_parsing_tests/test-files/model_four1.sql
    deleted file mode 100644
    index 97c5b226d8c..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/model_four1.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select * from {{ ref('model_three') }}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/model_four2.sql b/test/integration/068_partial_parsing_tests/test-files/model_four2.sql
    deleted file mode 100644
    index c38a4c9194f..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/model_four2.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select fun from {{ ref('model_one') }}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/model_one.sql b/test/integration/068_partial_parsing_tests/test-files/model_one.sql
    deleted file mode 100644
    index 3bd54a4c1b6..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/model_one.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as fun
    diff --git a/test/integration/068_partial_parsing_tests/test-files/model_three.sql b/test/integration/068_partial_parsing_tests/test-files/model_three.sql
    deleted file mode 100644
    index 45aa2b750f7..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/model_three.sql
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -{{ config(materialized='table') }}
    - 
    -with source_data as (
    - 
    -    select 1 as id
    -    union all
    -    select null as id
    - 
    -)
    - 
    -select *
    -from source_data
    diff --git a/test/integration/068_partial_parsing_tests/test-files/model_three_disabled.sql b/test/integration/068_partial_parsing_tests/test-files/model_three_disabled.sql
    deleted file mode 100644
    index a338a2ef4d2..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/model_three_disabled.sql
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -{{ config(materialized='table', enabled=False) }}
    - 
    -with source_data as (
    - 
    -    select 1 as id
    -    union all
    -    select null as id
    - 
    -)
    - 
    -select *
    -from source_data
    diff --git a/test/integration/068_partial_parsing_tests/test-files/model_three_disabled2.sql b/test/integration/068_partial_parsing_tests/test-files/model_three_disabled2.sql
    deleted file mode 100644
    index 4d416ab516e..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/model_three_disabled2.sql
    +++ /dev/null
    @@ -1,13 +0,0 @@
    -- Disabled model
    -{{ config(materialized='table', enabled=False) }}
    - 
    -with source_data as (
    - 
    -    select 1 as id
    -    union all
    -    select null as id
    - 
    -)
    - 
    -select *
    -from source_data
    diff --git a/test/integration/068_partial_parsing_tests/test-files/model_three_modified.sql b/test/integration/068_partial_parsing_tests/test-files/model_three_modified.sql
    deleted file mode 100644
    index e2d2df486c5..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/model_three_modified.sql
    +++ /dev/null
    @@ -1,14 +0,0 @@
    -{{ config(materialized='table') }}
    - 
    -with source_data as (
    -
    -    {#- This is model three #}
    - 
    -    select 1 as id
    -    union all
    -    select null as id
    - 
    -)
    - 
    -select *
    -from source_data
    diff --git a/test/integration/068_partial_parsing_tests/test-files/model_two.sql b/test/integration/068_partial_parsing_tests/test-files/model_two.sql
    deleted file mode 100644
    index 01f38b0698e..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/model_two.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as notfun
    diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema1.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema1.yml
    deleted file mode 100644
    index 36e5ce68a6e..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/models-schema1.yml
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -version: 2
    -
    -models:
    -    - name: model_one
    -      description: "The first model"
    diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema2.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema2.yml
    deleted file mode 100644
    index 7c9a890a481..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/models-schema2.yml
    +++ /dev/null
    @@ -1,11 +0,0 @@
    -version: 2
    -
    -models:
    -    - name: model_one
    -      description: "The first model"
    -    - name: model_three
    -      description: "The third model"
    -      columns:
    -        - name: id
    -          tests:
    -            - unique
    diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema2b.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema2b.yml
    deleted file mode 100644
    index c9369126ffc..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/models-schema2b.yml
    +++ /dev/null
    @@ -1,11 +0,0 @@
    -version: 2
    -
    -models:
    -    - name: model_one
    -      description: "The first model"
    -    - name: model_three
    -      description: "The third model"
    -      columns:
    -        - name: id
    -          tests:
    -            - not_null
    diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema3.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema3.yml
    deleted file mode 100644
    index 11e4468d248..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/models-schema3.yml
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -version: 2
    -
    -models:
    -    - name: model_one
    -      description: "The first model"
    -    - name: model_three
    -      description: "The third model"
    -      tests:
    -          - unique
    -macros:
    -    - name: do_something
    -      description: "This is a test macro"
    diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema4.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema4.yml
    deleted file mode 100644
    index 8087615fe49..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/models-schema4.yml
    +++ /dev/null
    @@ -1,13 +0,0 @@
    -version: 2
    -
    -models:
    -    - name: model_one
    -      description: "The first model"
    -    - name: model_three
    -      description: "The third model"
    -      config:
    -        enabled: false
    -      columns:
    -        - name: id
    -          tests:
    -            - unique
    diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema4b.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema4b.yml
    deleted file mode 100644
    index e73ffcef1de..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/models-schema4b.yml
    +++ /dev/null
    @@ -1,13 +0,0 @@
    -version: 2
    -
    -models:
    -    - name: model_one
    -      description: "The first model"
    -    - name: model_three
    -      description: "The third model"
    -      config:
    -        enabled: true
    -      columns:
    -        - name: id
    -          tests:
    -            - unique
    diff --git a/test/integration/068_partial_parsing_tests/test-files/my_analysis.sql b/test/integration/068_partial_parsing_tests/test-files/my_analysis.sql
    deleted file mode 100644
    index ec6959e9a68..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/my_analysis.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select * from customers
    diff --git a/test/integration/068_partial_parsing_tests/test-files/my_macro.sql b/test/integration/068_partial_parsing_tests/test-files/my_macro.sql
    deleted file mode 100644
    index 0bf3eda6c07..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/my_macro.sql
    +++ /dev/null
    @@ -1,7 +0,0 @@
    -{% macro do_something(foo2, bar2) %}
    -
    -    select
    -        '{{ foo2 }}' as foo2,
    -        '{{ bar2 }}' as bar2
    -
    -{% endmacro %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/my_macro2.sql b/test/integration/068_partial_parsing_tests/test-files/my_macro2.sql
    deleted file mode 100644
    index e64aafa5ab5..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/my_macro2.sql
    +++ /dev/null
    @@ -1,7 +0,0 @@
    -{% macro do_something(foo2, bar2) %}
    -
    -    select
    -        'foo' as foo2,
    -        'var' as bar2
    -
    -{% endmacro %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/my_metric.yml b/test/integration/068_partial_parsing_tests/test-files/my_metric.yml
    deleted file mode 100644
    index 521bc92290f..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/my_metric.yml
    +++ /dev/null
    @@ -1,23 +0,0 @@
    -version: 2
    -metrics:
    -  - name: new_customers
    -    label: New Customers
    -    model: customers
    -    description: "The number of paid customers who are using the product"
    -    calculation_method: count
    -    expression: user_id
    -    timestamp: signup_date
    -    time_grains: [day, week, month]
    -    dimensions:
    -      - plan
    -      - country
    -    filters:
    -      - field: is_paying
    -        value: True
    -        operator: '='
    -    +meta:
    -        is_okr: True
    -    tags:
    -      - okrs
    -
    -  
    diff --git a/test/integration/068_partial_parsing_tests/test-files/my_test.sql b/test/integration/068_partial_parsing_tests/test-files/my_test.sql
    deleted file mode 100644
    index fbfb738bc9a..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/my_test.sql
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -select
    -   * from {{ ref('customers') }} where first_name = '{{ macro_something() }}'
    diff --git a/test/integration/068_partial_parsing_tests/test-files/orders.sql b/test/integration/068_partial_parsing_tests/test-files/orders.sql
    deleted file mode 100644
    index ef61d616cc1..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/orders.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as id, 101 as user_id, 'pending' as status
    diff --git a/test/integration/068_partial_parsing_tests/test-files/people.sql b/test/integration/068_partial_parsing_tests/test-files/people.sql
    deleted file mode 100644
    index ce58d41a599..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/people.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -select 1 as id, 'Drew' as first_name, 'Banin' as last_name, 'yellow' as favorite_color, true as loves_dbt, 5 as tenure, current_timestamp as created_at
    -union all
    -select 1 as id, 'Jeremy' as first_name, 'Cohen' as last_name, 'indigo' as favorite_color, true as loves_dbt, 4 as tenure, current_timestamp as created_at
    diff --git a/test/integration/068_partial_parsing_tests/test-files/people_metrics.yml b/test/integration/068_partial_parsing_tests/test-files/people_metrics.yml
    deleted file mode 100644
    index 99d31a4e632..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/people_metrics.yml
    +++ /dev/null
    @@ -1,30 +0,0 @@
    -version: 2
    -
    -metrics:
    -
    -  - model: "ref('people')"
    -    name: number_of_people
    -    description: Total count of people
    -    label: "Number of people"
    -    calculation_method: count
    -    expression: "*"
    -    timestamp: created_at
    -    time_grains: [day, week, month]
    -    dimensions:
    -      - favorite_color
    -      - loves_dbt
    -    meta:
    -        my_meta: 'testing'
    -
    -  - model: "ref('people')"
    -    name: collective_tenure
    -    description: Total number of years of team experience
    -    label: "Collective tenure"
    -    calculation_method: sum
    -    expression: tenure
    -    timestamp: created_at
    -    time_grains: [day]
    -    filters:
    -      - field: loves_dbt
    -        operator: is
    -        value: 'true'
    diff --git a/test/integration/068_partial_parsing_tests/test-files/people_metrics2.yml b/test/integration/068_partial_parsing_tests/test-files/people_metrics2.yml
    deleted file mode 100644
    index 5f826e66e85..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/people_metrics2.yml
    +++ /dev/null
    @@ -1,30 +0,0 @@
    -version: 2
    -
    -metrics:
    -
    -  - model: "ref('people')"
    -    name: number_of_people
    -    description: Total count of people
    -    label: "Number of people"
    -    calculation_method: count
    -    expression: "*"
    -    timestamp: created_at
    -    time_grains: [day, week, month]
    -    dimensions:
    -      - favorite_color
    -      - loves_dbt
    -    meta:
    -        my_meta: 'replaced'
    -
    -  - model: "ref('people')"
    -    name: collective_tenure
    -    description: Total number of years of team experience
    -    label: "Collective tenure"
    -    calculation_method: sum
    -    expression: tenure
    -    timestamp: created_at
    -    time_grains: [day]
    -    filters:
    -      - field: loves_dbt
    -        operator: is
    -        value: 'true'
    diff --git a/test/integration/068_partial_parsing_tests/test-files/people_metrics3.yml b/test/integration/068_partial_parsing_tests/test-files/people_metrics3.yml
    deleted file mode 100644
    index b9c640591fc..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/people_metrics3.yml
    +++ /dev/null
    @@ -1,17 +0,0 @@
    -version: 2
    -
    -metrics:
    -
    -  - model: "ref('people')"
    -    name: number_of_people
    -    description: Total count of people
    -    label: "Number of people"
    -    calculation_method: count
    -    expression: "*"
    -    timestamp: created_at
    -    time_grains: [day, week, month]
    -    dimensions:
    -      - favorite_color
    -      - loves_dbt
    -    meta:
    -        my_meta: 'replaced'
    diff --git a/test/integration/068_partial_parsing_tests/test-files/raw_customers.csv b/test/integration/068_partial_parsing_tests/test-files/raw_customers.csv
    deleted file mode 100644
    index 2315be73844..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/raw_customers.csv
    +++ /dev/null
    @@ -1,11 +0,0 @@
    -id,first_name,last_name,email
    -1,Michael,Perez,mperez0@chronoengine.com
    -2,Shawn,Mccoy,smccoy1@reddit.com
    -3,Kathleen,Payne,kpayne2@cargocollective.com
    -4,Jimmy,Cooper,jcooper3@cargocollective.com
    -5,Katherine,Rice,krice4@typepad.com
    -6,Sarah,Ryan,sryan5@gnu.org
    -7,Martin,Mcdonald,mmcdonald6@opera.com
    -8,Frank,Robinson,frobinson7@wunderground.com
    -9,Jennifer,Franklin,jfranklin8@mail.ru
    -10,Henry,Welch,hwelch9@list-manage.com
    diff --git a/test/integration/068_partial_parsing_tests/test-files/ref_override.sql b/test/integration/068_partial_parsing_tests/test-files/ref_override.sql
    deleted file mode 100644
    index cd16793d3c4..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/ref_override.sql
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -- Macro to override ref
    -{% macro ref(modelname) %}
    -{% do return(builtins.ref(modelname)) %}
    -{% endmacro %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/ref_override2.sql b/test/integration/068_partial_parsing_tests/test-files/ref_override2.sql
    deleted file mode 100644
    index 2e8027d8e80..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/ref_override2.sql
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -- Macro to override ref xxxx
    -{% macro ref(modelname) %}
    -{% do return(builtins.ref(modelname)) %}
    -{% endmacro %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-models-c.yml b/test/integration/068_partial_parsing_tests/test-files/schema-models-c.yml
    deleted file mode 100644
    index 432b5e0efe3..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/schema-models-c.yml
    +++ /dev/null
    @@ -1,14 +0,0 @@
    -version: 2
    -
    -sources:
    -  - name: seed_source
    -    description: "This is a source override"
    -    overrides: local_dep
    -    schema: "{{ var('schema_override', target.schema) }}"
    -    tables:
    -      - name: "seed"
    -        columns:
    -          - name: id
    -            tests:
    -              - unique
    -              - not_null
    diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources1.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources1.yml
    deleted file mode 100644
    index 30363115e09..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources1.yml
    +++ /dev/null
    @@ -1,17 +0,0 @@
    -version: 2
    -sources:
    -  - name: seed_sources
    -    schema: "{{ target.schema }}"
    -    tables:
    -      - name: raw_customers
    -        columns:
    -          - name: id
    -            tests:
    -              - not_null:
    -                  severity: "{{ 'error' if target.name == 'prod' else 'warn' }}"
    -              - unique
    -          - name: first_name
    -          - name: last_name
    -          - name: email
    -
    -
    diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources2.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources2.yml
    deleted file mode 100644
    index 5927952917f..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources2.yml
    +++ /dev/null
    @@ -1,29 +0,0 @@
    -version: 2
    -
    -sources:
    -  - name: seed_sources
    -    schema: "{{ target.schema }}"
    -    tables:
    -      - name: raw_customers
    -        columns:
    -          - name: id
    -            tests:
    -              - not_null:
    -                  severity: "{{ 'error' if target.name == 'prod' else 'warn' }}"
    -              - unique
    -          - name: first_name
    -          - name: last_name
    -          - name: email
    -
    -exposures:
    -  - name: proxy_for_dashboard
    -    description: "This is for the XXX dashboard"
    -    type: "dashboard"
    -    owner:
    -      name: "Dashboard Tester"
    -      email: "tester@dashboard.com"
    -    depends_on:
    -      - ref("model_one")
    -      - ref("raw_customers")
    -      - source("seed_sources", "raw_customers")
    -
    diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources3.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources3.yml
    deleted file mode 100644
    index 54133a9a2f5..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources3.yml
    +++ /dev/null
    @@ -1,28 +0,0 @@
    -version: 2
    -
    -sources:
    -  - name: seed_sources
    -    schema: "{{ target.schema }}"
    -    tables:
    -      - name: raw_customers
    -        columns:
    -          - name: id
    -            tests:
    -              - not_null:
    -                  severity: "{{ 'error' if target.name == 'prod' else 'warn' }}"
    -              - unique
    -          - name: first_name
    -          - name: last_name
    -          - name: email
    -
    -exposures:
    -  - name: proxy_for_dashboard
    -    description: "This is for the XXX dashboard"
    -    type: "dashboard"
    -    owner:
    -      name: "Dashboard Tester"
    -      email: "tester@dashboard.com"
    -    depends_on:
    -      - ref("model_one")
    -      - source("seed_sources", "raw_customers")
    -
    diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources4.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources4.yml
    deleted file mode 100644
    index af76a0f315a..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources4.yml
    +++ /dev/null
    @@ -1,30 +0,0 @@
    -version: 2
    -
    -sources:
    -  - name: seed_sources
    -    schema: "{{ target.schema }}"
    -    tables:
    -      - name: raw_customers
    -        columns:
    -          - name: id
    -            tests:
    -              - not_null:
    -                  severity: "{{ 'error' if target.name == 'prod' else 'warn' }}"
    -              - unique
    -              - every_value_is_blue
    -          - name: first_name
    -          - name: last_name
    -          - name: email
    -
    -seeds:
    -  - name: raw_customers
    -    description: "Raw customer data"
    -    columns:
    -      - name: id
    -        tests:
    -          - unique
    -          - not_null
    -      - name: first_name
    -      - name: last_name
    -      - name: email
    -
    diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources5.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources5.yml
    deleted file mode 100644
    index 57818771b71..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources5.yml
    +++ /dev/null
    @@ -1,29 +0,0 @@
    -version: 2
    -
    -sources:
    -  - name: seed_sources
    -    schema: "{{ target.schema }}"
    -    tables:
    -      - name: raw_customers
    -        columns:
    -          - name: id
    -            tests:
    -              - not_null:
    -                  severity: "{{ 'error' if target.name == 'prod' else 'warn' }}"
    -              - unique
    -          - name: first_name
    -          - name: last_name
    -          - name: email
    -
    -seeds:
    -  - name: rad_customers
    -    description: "Raw customer data"
    -    columns:
    -      - name: id
    -        tests:
    -          - unique
    -          - not_null
    -      - name: first_name
    -      - name: last_name
    -      - name: email
    -
    diff --git a/test/integration/068_partial_parsing_tests/test-files/snapshot.sql b/test/integration/068_partial_parsing_tests/test-files/snapshot.sql
    deleted file mode 100644
    index c82a2fa5906..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/snapshot.sql
    +++ /dev/null
    @@ -1,29 +0,0 @@
    -{% snapshot orders_snapshot %}
    -
    -{{
    -    config(
    -      target_schema=schema,
    -      strategy='check',
    -      unique_key='id',
    -      check_cols=['status'],
    -    )
    -}}
    -
    -select * from {{ ref('orders') }}
    -
    -{% endsnapshot %}
    -
    -{% snapshot orders2_snapshot %}
    -
    -{{
    -    config(
    -      target_schema=schema,
    -      strategy='check',
    -      unique_key='id',
    -      check_cols=['order_date'],
    -    )
    -}}
    -
    -select * from {{ ref('orders') }}
    -
    -{% endsnapshot %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/snapshot2.sql b/test/integration/068_partial_parsing_tests/test-files/snapshot2.sql
    deleted file mode 100644
    index 27d320618c9..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/snapshot2.sql
    +++ /dev/null
    @@ -1,30 +0,0 @@
    -- add a comment
    -{% snapshot orders_snapshot %}
    -
    -{{
    -    config(
    -      target_schema=schema,
    -      strategy='check',
    -      unique_key='id',
    -      check_cols=['status'],
    -    )
    -}}
    -
    -select * from {{ ref('orders') }}
    -
    -{% endsnapshot %}
    -
    -{% snapshot orders2_snapshot %}
    -
    -{{
    -    config(
    -      target_schema=schema,
    -      strategy='check',
    -      unique_key='id',
    -      check_cols=['order_date'],
    -    )
    -}}
    -
    -select * from {{ ref('orders') }}
    -
    -{% endsnapshot %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/sources-tests1.sql b/test/integration/068_partial_parsing_tests/test-files/sources-tests1.sql
    deleted file mode 100644
    index dd8710f0556..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/sources-tests1.sql
    +++ /dev/null
    @@ -1,9 +0,0 @@
    -
    -{% test every_value_is_blue(model, column_name) %}
    -
    -    select *
    -    from {{ model }}
    -    where {{ column_name }} = 9999
    -
    -{% endtest %}
    -
    diff --git a/test/integration/068_partial_parsing_tests/test-files/sources-tests2.sql b/test/integration/068_partial_parsing_tests/test-files/sources-tests2.sql
    deleted file mode 100644
    index 3abcf30a658..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/sources-tests2.sql
    +++ /dev/null
    @@ -1,9 +0,0 @@
    -
    -{% test every_value_is_blue(model, column_name) %}
    -
    -    select *
    -    from {{ model }}
    -    where {{ column_name }} != 99
    -
    -{% endtest %}
    -
    diff --git a/test/integration/068_partial_parsing_tests/test-files/test-macro.sql b/test/integration/068_partial_parsing_tests/test-files/test-macro.sql
    deleted file mode 100644
    index f2b1ecfc86b..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/test-macro.sql
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -{% macro macro_something() %}
    -
    -    {% do return('macro_something') %}
    -
    -{% endmacro %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/test-macro2.sql b/test/integration/068_partial_parsing_tests/test-files/test-macro2.sql
    deleted file mode 100644
    index 52b4469cd01..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/test-macro2.sql
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -{% macro macro_something() %}
    -
    -    {% do return('some_name') %}
    -
    -{% endmacro %}
    diff --git a/test/integration/068_partial_parsing_tests/test-files/test_color.sql b/test/integration/068_partial_parsing_tests/test-files/test_color.sql
    deleted file mode 100644
    index 0bb1cdcd96c..00000000000
    --- a/test/integration/068_partial_parsing_tests/test-files/test_color.sql
    +++ /dev/null
    @@ -1,7 +0,0 @@
    -{% test check_color(model, column_name, color) %}
    -
    -    select *
    -    from {{ model }}
    -    where {{ column_name }} = '{{ color }}'
    -
    -{% endtest %}
    diff --git a/test/integration/068_partial_parsing_tests/test_partial_parsing.py b/test/integration/068_partial_parsing_tests/test_partial_parsing.py
    deleted file mode 100644
    index 648abdc4657..00000000000
    --- a/test/integration/068_partial_parsing_tests/test_partial_parsing.py
    +++ /dev/null
    @@ -1,578 +0,0 @@
    -from dbt.exceptions import CompilationException, ParsingException
    -from dbt.contracts.graph.manifest import Manifest
    -from dbt.contracts.files import ParseFileType
    -from dbt.contracts.results import TestStatus
    -from dbt.parser.partial import special_override_macros
    -from test.integration.base import DBTIntegrationTest, use_profile, normalize, get_manifest
    -import shutil
    -import os
    -
    -
    -# Note: every test case needs to have separate directories, otherwise
    -# they will interfere with each other when tests are multi-threaded
    -
    -class BasePPTest(DBTIntegrationTest):
    -
    -    @property
    -    def schema(self):
    -        return "test_068A"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'seed-paths': ['seeds'],
    -            'test-paths': ['tests'],
    -            'macro-paths': ['macros'],
    -            'analysis-paths': ['analyses'],
    -            'snapshot-paths': ['snapshots'],
    -            'seeds': {
    -                'quote_columns': False,
    -            },
    -        }
    -
    -    def setup_directories(self):
    -        # Create the directories for the test in the `self.test_root_dir`
    -        # directory after everything else is symlinked. We can copy to and
    -        # delete files in this directory without tests interfering with each other.
    -        os.mkdir(os.path.join(self.test_root_dir, 'models'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'tests'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'tests', 'generic'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'seeds'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'macros'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'analyses'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'snapshots'))
    -        os.environ['DBT_PP_TEST'] = 'true'
    -
    -
    -
    -class ModelTest(BasePPTest):
    -
    -    @use_profile('postgres')
    -    def test_postgres_pp_models(self):
    -        self.setup_directories()
    -        self.copy_file('test-files/model_one.sql', 'models/model_one.sql')
    -        # initial run
    -        self.run_dbt(['clean'])
    -        results = self.run_dbt(["run"])
    -        self.assertEqual(len(results), 1)
    -
    -        # add a model file
    -        self.copy_file('test-files/model_two.sql', 'models/model_two.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 2)
    -
    -        # add a schema file
    -        self.copy_file('test-files/models-schema1.yml', 'models/schema.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 2)
    -        manifest = get_manifest()
    -        self.assertIn('model.test.model_one', manifest.nodes)
    -        model_one_node = manifest.nodes['model.test.model_one']
    -        self.assertEqual(model_one_node.description, 'The first model')
    -        self.assertEqual(model_one_node.patch_path, 'test://' + normalize('models/schema.yml'))
    -
    -        # add a model and a schema file (with a test) at the same time
    -        self.copy_file('test-files/models-schema2.yml', 'models/schema.yml')
    -        self.copy_file('test-files/model_three.sql', 'models/model_three.sql')
    -        results = self.run_dbt(["--partial-parse", "test"], expect_pass=False)
    -        self.assertEqual(len(results), 1)
    -        manifest = get_manifest()
    -        project_files = [f for f in manifest.files if f.startswith('test://')]
    -        self.assertEqual(len(project_files), 4)
    -        model_3_file_id = 'test://' + normalize('models/model_three.sql')
    -        self.assertIn(model_3_file_id, manifest.files)
    -        model_three_file = manifest.files[model_3_file_id]
    -        self.assertEqual(model_three_file.parse_file_type, ParseFileType.Model)
    -        self.assertEqual(type(model_three_file).__name__, 'SourceFile')
    -        model_three_node = manifest.nodes[model_three_file.nodes[0]]
    -        schema_file_id = 'test://' + normalize('models/schema.yml')
    -        self.assertEqual(model_three_node.patch_path, schema_file_id)
    -        self.assertEqual(model_three_node.description, 'The third model')
    -        schema_file = manifest.files[schema_file_id]
    -        self.assertEqual(type(schema_file).__name__, 'SchemaSourceFile')
    -        self.assertEqual(len(schema_file.tests), 1)
    -        tests = schema_file.get_all_test_ids()
    -        self.assertEqual(tests, ['test.test.unique_model_three_id.6776ac8160'])
    -        unique_test_id = tests[0]
    -        self.assertIn(unique_test_id, manifest.nodes)
    -
    -        # modify model sql file, ensure description still there
    -        self.copy_file('test-files/model_three_modified.sql', 'models/model_three.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        manifest = get_manifest()
    -        model_id = 'model.test.model_three'
    -        self.assertIn(model_id, manifest.nodes)
    -        model_three_node = manifest.nodes[model_id]
    -        self.assertEqual(model_three_node.description, 'The third model')
    -
    -        # Change the model 3 test from unique to not_null
    -        self.copy_file('test-files/models-schema2b.yml', 'models/schema.yml')
    -        results = self.run_dbt(["--partial-parse", "test"], expect_pass=False)
    -        manifest = get_manifest()
    -        schema_file_id = 'test://' + normalize('models/schema.yml')
    -        schema_file = manifest.files[schema_file_id]
    -        tests = schema_file.get_all_test_ids()
    -        self.assertEqual(tests, ['test.test.not_null_model_three_id.3162ce0a6f'])
    -        not_null_test_id = tests[0]
    -        self.assertIn(not_null_test_id, manifest.nodes.keys())
    -        self.assertNotIn(unique_test_id, manifest.nodes.keys())
    -        self.assertEqual(len(results), 1)
    -
    -        # go back to previous version of schema file, removing patch, test, and model for model three
    -        self.copy_file('test-files/models-schema1.yml', 'models/schema.yml')
    -        self.rm_file(normalize('models/model_three.sql'))
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 2)
    -
    -        # remove schema file, still have 3 models
    -        self.copy_file('test-files/model_three.sql', 'models/model_three.sql')
    -        self.rm_file(normalize('models/schema.yml'))
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -        manifest = get_manifest()
    -        schema_file_id = 'test://' + normalize('models/schema.yml')
    -        self.assertNotIn(schema_file_id, manifest.files)
    -        project_files = [f for f in manifest.files if f.startswith('test://')]
    -        self.assertEqual(len(project_files), 3)
    -
    -        # Put schema file back and remove a model
    -        # referred to in schema file
    -        self.copy_file('test-files/models-schema2.yml', 'models/schema.yml')
    -        self.rm_file('models/model_three.sql')
    -        with self.assertRaises(CompilationException):
    -            results = self.run_dbt(["--partial-parse", "--warn-error", "run"])
    -
    -        # Put model back again
    -        self.copy_file('test-files/model_three.sql', 'models/model_three.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -
    -        # Add model four refing model three
    -        self.copy_file('test-files/model_four1.sql', 'models/model_four.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 4)
    -
    -        # Remove model_three and change model_four to ref model_one
    -        # and change schema file to remove model_three
    -        self.rm_file('models/model_three.sql')
    -        self.copy_file('test-files/model_four2.sql', 'models/model_four.sql')
    -        self.copy_file('test-files/models-schema1.yml', 'models/schema.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -
    -        # Remove model four, put back model three, put back schema file
    -        self.copy_file('test-files/model_three.sql', 'models/model_three.sql')
    -        self.copy_file('test-files/models-schema2.yml', 'models/schema.yml')
    -        self.rm_file('models/model_four.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -
    -        # disable model three in the schema file
    -        self.copy_file('test-files/models-schema4.yml', 'models/schema.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 2)
    -
    -        # update enabled config to be true for model three in the schema file
    -        self.copy_file('test-files/models-schema4b.yml', 'models/schema.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -
    -        # disable model three in the schema file again
    -        self.copy_file('test-files/models-schema4.yml', 'models/schema.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 2)
    -
    -        # remove disabled config for model three in the schema file to check it gets enabled
    -        self.copy_file('test-files/models-schema3.yml', 'models/schema.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -
    -        # Add a macro
    -        self.copy_file('test-files/my_macro.sql', 'macros/my_macro.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -        manifest = get_manifest()
    -        macro_id = 'macro.test.do_something'
    -        self.assertIn(macro_id, manifest.macros)
    -
    -        # Modify the macro
    -        self.copy_file('test-files/my_macro2.sql', 'macros/my_macro.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -
    -        # Add a macro patch
    -        self.copy_file('test-files/models-schema3.yml', 'models/schema.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -
    -        # Remove the macro
    -        self.rm_file('macros/my_macro.sql')
    -        with self.assertRaises(CompilationException):
    -            results = self.run_dbt(["--partial-parse", "--warn-error", "run"])
    -
    -        # put back macro file, got back to schema file with no macro
    -        # add separate macro patch schema file
    -        self.copy_file('test-files/models-schema2.yml', 'models/schema.yml')
    -        self.copy_file('test-files/my_macro.sql', 'macros/my_macro.sql')
    -        self.copy_file('test-files/macros.yml', 'macros/macros.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -
    -        # delete macro and schema file
    -        self.rm_file('macros/my_macro.sql')
    -        self.rm_file('macros/macros.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -
    -        # Add an empty schema file
    -        self.copy_file('test-files/empty_schema.yml', 'models/eschema.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -
    -        # Add version to empty schema file
    -        self.copy_file('test-files/empty_schema_with_version.yml', 'models/eschema.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -
    -        # Disable model_three
    -        self.copy_file('test-files/model_three_disabled.sql', 'models/model_three.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 2)
    -        manifest = get_manifest()
    -        model_id = 'model.test.model_three'
    -        self.assertIn(model_id, manifest.disabled)
    -        self.assertNotIn(model_id, manifest.nodes)
    -
    -        # Edit disabled model three
    -        self.copy_file('test-files/model_three_disabled2.sql', 'models/model_three.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 2)
    -        manifest = get_manifest()
    -        model_id = 'model.test.model_three'
    -        self.assertIn(model_id, manifest.disabled)
    -        self.assertNotIn(model_id, manifest.nodes)
    -
    -        # Remove disabled from model three
    -        self.copy_file('test-files/model_three.sql', 'models/model_three.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -        manifest = get_manifest()
    -        model_id = 'model.test.model_three'
    -        self.assertIn(model_id, manifest.nodes)
    -        self.assertNotIn(model_id, manifest.disabled)
    -
    -
    -class TestSources(BasePPTest):
    -
    -    @use_profile('postgres')
    -    def test_postgres_pp_sources(self):
    -        self.setup_directories()
    -        # initial run
    -        self.copy_file('test-files/model_one.sql', 'models/model_one.sql')
    -        self.run_dbt(['clean'])
    -        self.copy_file('test-files/raw_customers.csv', 'seeds/raw_customers.csv')
    -        self.copy_file('test-files/sources-tests1.sql', 'macros/tests.sql')
    -        results = self.run_dbt(["run"])
    -        self.assertEqual(len(results), 1)
    -
    -        # Partial parse running 'seed'
    -        self.run_dbt(['--partial-parse', 'seed'])
    -        manifest = get_manifest()
    -        seed_file_id = 'test://' + normalize('seeds/raw_customers.csv')
    -        self.assertIn(seed_file_id, manifest.files)
    -
    -        # Add another seed file
    -        self.copy_file('test-files/raw_customers.csv', 'seeds/more_customers.csv')
    -        self.run_dbt(['--partial-parse', 'run'])
    -        seed_file_id = 'test://' + normalize('seeds/more_customers.csv')
    -        manifest = get_manifest()
    -        self.assertIn(seed_file_id, manifest.files)
    -        seed_id = 'seed.test.more_customers'
    -        self.assertIn(seed_id, manifest.nodes)
    -
    -        # Remove seed file and add a schema files with a source referring to raw_customers
    -        self.rm_file(normalize('seeds/more_customers.csv'))
    -        self.copy_file('test-files/schema-sources1.yml', 'models/sources.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        manifest = get_manifest()
    -        self.assertEqual(len(manifest.sources), 1)
    -        file_id = 'test://' + normalize('models/sources.yml')
    -        self.assertIn(file_id, manifest.files)
    -
    -        # add a model referring to raw_customers source
    -        self.copy_file('test-files/customers.sql', 'models/customers.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 2)
    -
    -        # remove sources schema file
    -        self.rm_file(normalize('models/sources.yml'))
    -        with self.assertRaises(CompilationException):
    -            results = self.run_dbt(["--partial-parse", "run"])
    -
    -        # put back sources and add an exposures file
    -        self.copy_file('test-files/schema-sources2.yml', 'models/sources.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -
    -        # remove seed referenced in exposures file
    -        self.rm_file(normalize('seeds/raw_customers.csv'))
    -        with self.assertRaises(CompilationException):
    -            results = self.run_dbt(["--partial-parse", "run"])
    -
    -        # put back seed and remove depends_on from exposure
    -        self.copy_file('test-files/raw_customers.csv', 'seeds/raw_customers.csv')
    -        self.copy_file('test-files/schema-sources3.yml', 'models/sources.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -
    -        # Add seed config with test to schema.yml, remove exposure
    -        self.copy_file('test-files/schema-sources4.yml', 'models/sources.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -
    -        # Change seed name to wrong name
    -        self.copy_file('test-files/schema-sources5.yml', 'models/sources.yml')
    -        with self.assertRaises(CompilationException):
    -            results = self.run_dbt(["--partial-parse", "--warn-error", "run"])
    -
    -        # Put back seed name to right name
    -        self.copy_file('test-files/schema-sources4.yml', 'models/sources.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -
    -        # Add docs file customers.md
    -        self.copy_file('test-files/customers1.md', 'models/customers.md')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -
    -        # Change docs file customers.md
    -        self.copy_file('test-files/customers2.md', 'models/customers.md')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -
    -        # Delete docs file
    -        self.rm_file(normalize('models/customers.md'))
    -        results = self.run_dbt(["--partial-parse", "run"])
    -
    -        # Add a data test
    -        self.copy_file('test-files/test-macro.sql', 'macros/test-macro.sql')
    -        self.copy_file('test-files/my_test.sql', 'tests/my_test.sql')
    -        results = self.run_dbt(["--partial-parse", "test"])
    -        manifest = get_manifest()
    -        self.assertEqual(len(manifest.nodes), 9)
    -        test_id = 'test.test.my_test'
    -        self.assertIn(test_id, manifest.nodes)
    -
    -        # Change macro that data test depends on
    -        self.copy_file('test-files/test-macro2.sql', 'macros/test-macro.sql')
    -        results = self.run_dbt(["--partial-parse", "test"])
    -        manifest = get_manifest()
    -
    -        # Add an analysis
    -        self.copy_file('test-files/my_analysis.sql', 'analyses/my_analysis.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        manifest = get_manifest()
    -
    -        # Remove data test
    -        self.rm_file(normalize('tests/my_test.sql'))
    -        results = self.run_dbt(["--partial-parse", "test"])
    -        manifest = get_manifest()
    -        self.assertEqual(len(manifest.nodes), 9)
    -
    -        # Remove analysis
    -        self.rm_file(normalize('analyses/my_analysis.sql'))
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        manifest = get_manifest()
    -        self.assertEqual(len(manifest.nodes), 8)
    -
    -        # Change source test
    -        self.copy_file('test-files/sources-tests2.sql', 'macros/tests.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -
    -
    -class TestPartialParsingDependency(BasePPTest):
    -
    -    @property
    -    def packages_config(self):
    -        return {
    -            "packages": [
    -                {
    -                    'local': 'local_dependency'
    -                }
    -            ]
    -        }
    -
    -    @use_profile("postgres")
    -    def test_postgres_parsing_with_dependency(self):
    -        self.setup_directories()
    -        self.copy_file('test-files/model_one.sql', 'models/model_one.sql')
    -        self.run_dbt(["clean"])
    -        self.run_dbt(["deps"])
    -        self.run_dbt(["seed"])
    -        self.run_dbt(["run"])
    -
    -        # Add a source override
    -        self.copy_file('test-files/schema-models-c.yml', 'models/schema.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 2)
    -        manifest = get_manifest()
    -        self.assertEqual(len(manifest.sources), 1)
    -        source_id = 'source.local_dep.seed_source.seed'
    -        self.assertIn(source_id, manifest.sources)
    -        # We have 1 root model, 1 local_dep model, 1 local_dep seed, 1 local_dep source test, 2 root source tests
    -        self.assertEqual(len(manifest.nodes), 5)
    -        test_id = 'test.local_dep.source_unique_seed_source_seed_id.afa94935ed'
    -        test_node = manifest.nodes[test_id]
    -
    -
    -        # Remove a source override
    -        self.rm_file(normalize('models/schema.yml'))
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        manifest = get_manifest()
    -        self.assertEqual(len(manifest.sources), 1)
    -
    -
    -class TestMacros(BasePPTest):
    -
    -    @use_profile('postgres')
    -    def test_postgres_nested_macros(self):
    -        self.setup_directories()
    -        self.copy_file('test-files/model_a.sql', 'models/model_a.sql')
    -        self.copy_file('test-files/model_b.sql', 'models/model_b.sql')
    -        self.copy_file('test-files/macros-schema.yml', 'models/schema.yml')
    -        self.copy_file('test-files/custom_schema_tests1.sql', 'macros/custom_schema_tests.sql')
    -        results = self.run_dbt()
    -        self.assertEqual(len(results), 2)
    -        manifest = get_manifest()
    -        macro_child_map = manifest.build_macro_child_map()
    -        macro_unique_id = 'macro.test.test_type_two'
    -
    -        results = self.run_dbt(['test'], expect_pass=False)
    -        results = sorted(results, key=lambda r: r.node.name)
    -        self.assertEqual(len(results), 2)
    -        # type_one_model_a_
    -        self.assertEqual(results[0].status, TestStatus.Fail)
    -        self.assertRegex(results[0].node.compiled_code, r'union all')
    -        # type_two_model_a_
    -        self.assertEqual(results[1].status, TestStatus.Warn)
    -        self.assertEqual(results[1].node.config.severity, 'WARN')
    -
    -        self.copy_file('test-files/custom_schema_tests2.sql', 'macros/custom_schema_tests.sql')
    -        results = self.run_dbt(["--partial-parse", "test"], expect_pass=False)
    -        manifest = get_manifest()
    -        test_node_id = 'test.test.type_two_model_a_.842bc6c2a7'
    -        self.assertIn(test_node_id, manifest.nodes)
    -        results = sorted(results, key=lambda r: r.node.name)
    -        self.assertEqual(len(results), 2)
    -        # type_two_model_a_
    -        self.assertEqual(results[1].status, TestStatus.Fail)
    -        self.assertEqual(results[1].node.config.severity, 'ERROR')
    -
    -    @use_profile('postgres')
    -    def test_postgres_skip_macros(self):
    -        expected_special_override_macros = [
    -            'ref', 'source', 'config', 'generate_schema_name',
    -            'generate_database_name', 'generate_alias_name'
    -        ]
    -        self.assertEqual(special_override_macros, expected_special_override_macros)
    -
    -        # initial run so we have a msgpack file
    -        self.setup_directories()
    -        self.copy_file('test-files/model_one.sql', 'models/model_one.sql')
    -        results = self.run_dbt()
    -
    -        # add a new ref override macro
    -        self.copy_file('test-files/ref_override.sql', 'macros/ref_override.sql')
    -        results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run'])
    -        self.assertTrue('Starting full parse.' in log_output)
    -
    -        # modify a ref override macro
    -        self.copy_file('test-files/ref_override2.sql', 'macros/ref_override.sql')
    -        results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run'])
    -        self.assertTrue('Starting full parse.' in log_output)
    -
    -        # remove a ref override macro
    -        self.rm_file(normalize('macros/ref_override.sql'))
    -        results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run'])
    -        self.assertTrue('Starting full parse.' in log_output)
    -
    -        # custom generate_schema_name macro
    -        self.copy_file('test-files/gsm_override.sql', 'macros/gsm_override.sql')
    -        results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run'])
    -        self.assertTrue('Starting full parse.' in log_output)
    -
    -        # change generate_schema_name macro
    -        self.copy_file('test-files/gsm_override2.sql', 'macros/gsm_override.sql')
    -        results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run'])
    -        self.assertTrue('Starting full parse.' in log_output)
    -
    -
    -class TestSnapshots(BasePPTest):
    -
    -    @use_profile('postgres')
    -    def test_postgres_pp_snapshots(self):
    -
    -        # initial run 
    -        self.setup_directories()
    -        self.copy_file('test-files/orders.sql', 'models/orders.sql')
    -        results = self.run_dbt() 
    -        self.assertEqual(len(results), 1)
    -
    -        # add snapshot
    -        self.copy_file('test-files/snapshot.sql', 'snapshots/snapshot.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 1)
    -        manifest = get_manifest()
    -        snapshot_id = 'snapshot.test.orders_snapshot'
    -        self.assertIn(snapshot_id, manifest.nodes)
    -        snapshot2_id = 'snapshot.test.orders2_snapshot'
    -        self.assertIn(snapshot2_id, manifest.nodes)
    -
    -        # run snapshot
    -        results = self.run_dbt(["--partial-parse", "snapshot"])
    -        self.assertEqual(len(results), 2)
    -
    -        # modify snapshot
    -        self.copy_file('test-files/snapshot2.sql', 'snapshots/snapshot.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 1)
    -
    -        # delete snapshot
    -        self.rm_file(normalize('snapshots/snapshot.sql'))
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 1)
    -
    -
    -class TestTests(BasePPTest):
    -
    -    @use_profile('postgres')
    -    def test_postgres_pp_generic_tests(self):
    -
    -        # initial run 
    -        self.setup_directories()
    -        self.copy_file('test-files/orders.sql', 'models/orders.sql')
    -        self.copy_file('test-files/generic_schema.yml', 'models/schema.yml')
    -        results = self.run_dbt()
    -        self.assertEqual(len(results), 1)
    -        manifest = get_manifest()
    -        expected_nodes = ['model.test.orders', 'test.test.unique_orders_id.1360ecc70e']
    -        self.assertCountEqual(expected_nodes, list(manifest.nodes.keys()))
    -
    -        # add generic test in test-path
    -        self.copy_file('test-files/generic_test.sql', 'tests/generic/generic_test.sql')
    -        self.copy_file('test-files/generic_test_schema.yml', 'models/schema.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 1)
    -        manifest = get_manifest()
    -        test_id = 'test.test.is_odd_orders_id.82834fdc5b'
    -        self.assertIn(test_id, manifest.nodes)
    -        expected_nodes = ['model.test.orders', 'test.test.unique_orders_id.1360ecc70e', 'test.test.is_odd_orders_id.82834fdc5b']
    -        self.assertCountEqual(expected_nodes, list(manifest.nodes.keys()))
    -
    -        # edit generic test in test-path
    -        self.copy_file('test-files/generic_test_edited.sql', 'tests/generic/generic_test.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 1)
    -        manifest = get_manifest()
    -        test_id = 'test.test.is_odd_orders_id.82834fdc5b'
    -        self.assertIn(test_id, manifest.nodes)
    -        expected_nodes = ['model.test.orders', 'test.test.unique_orders_id.1360ecc70e', 'test.test.is_odd_orders_id.82834fdc5b']
    -        self.assertCountEqual(expected_nodes, list(manifest.nodes.keys()))
    diff --git a/test/integration/068_partial_parsing_tests/test_pp_metrics.py b/test/integration/068_partial_parsing_tests/test_pp_metrics.py
    deleted file mode 100644
    index b9cbc69e3aa..00000000000
    --- a/test/integration/068_partial_parsing_tests/test_pp_metrics.py
    +++ /dev/null
    @@ -1,106 +0,0 @@
    -from dbt.exceptions import CompilationException, UndefinedMacroException
    -from dbt.contracts.graph.manifest import Manifest
    -from dbt.contracts.files import ParseFileType
    -from dbt.contracts.results import TestStatus
    -from dbt.parser.partial import special_override_macros
    -from test.integration.base import DBTIntegrationTest, use_profile, normalize, get_manifest
    -import shutil
    -import os
    -
    -
    -# Note: every test case needs to have separate directories, otherwise
    -# they will interfere with each other when tests are multi-threaded
    -
    -class BasePPTest(DBTIntegrationTest):
    -
    -    @property
    -    def schema(self):
    -        return "test_068A"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'data-paths': ['seeds'],
    -            'test-paths': ['tests'],
    -            'macro-paths': ['macros'],
    -            'analysis-paths': ['analyses'],
    -            'snapshot-paths': ['snapshots'],
    -            'seeds': {
    -                'quote_columns': False,
    -            },
    -        }
    -
    -    def setup_directories(self):
    -        # Create the directories for the test in the `self.test_root_dir`
    -        # directory after everything else is symlinked. We can copy to and
    -        # delete files in this directory without tests interfering with each other.
    -        os.mkdir(os.path.join(self.test_root_dir, 'models'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'tests'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'seeds'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'macros'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'analyses'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'snapshots'))
    -        os.environ['DBT_PP_TEST'] = 'true'
    -
    -
    -
    -class MetricsTest(BasePPTest):
    -
    -    @use_profile('postgres')
    -    def test_postgres_metrics(self):
    -        self.setup_directories()
    -        # initial run
    -        self.copy_file('test-files/people.sql', 'models/people.sql')
    -        results = self.run_dbt(["run"])
    -        self.assertEqual(len(results), 1)
    -        manifest = get_manifest()
    -        self.assertEqual(len(manifest.nodes), 1)
    -
    -        # Add metrics yaml file
    -        self.copy_file('test-files/people_metrics.yml', 'models/people_metrics.yml')
    -        results = self.run_dbt(["run"])
    -        self.assertEqual(len(results), 1)
    -        manifest = get_manifest()
    -        self.assertEqual(len(manifest.metrics), 2)
    -        metric_people_id = 'metric.test.number_of_people'
    -        metric_tenure_id = 'metric.test.collective_tenure'
    -        metric_people = manifest.metrics[metric_people_id]
    -        metric_tenure = manifest.metrics[metric_tenure_id]
    -        expected_meta = {'my_meta': 'testing'}
    -        self.assertEqual(metric_people.meta, expected_meta)
    -        self.assertEqual(metric_people.refs, [['people']])
    -        self.assertEqual(metric_tenure.refs, [['people']])
    -        expected_depends_on_nodes = ['model.test.people']
    -        self.assertEqual(metric_people.depends_on.nodes, expected_depends_on_nodes)
    -
    -        # Change metrics yaml files
    -        self.copy_file('test-files/people_metrics2.yml', 'models/people_metrics.yml')
    -        results = self.run_dbt(["run"])
    -        self.assertEqual(len(results), 1)
    -        manifest = get_manifest()
    -        metric_people = manifest.metrics[metric_people_id]
    -        expected_meta = {'my_meta': 'replaced'}
    -        self.assertEqual(metric_people.meta, expected_meta)
    -        expected_depends_on_nodes = ['model.test.people']
    -        self.assertEqual(metric_people.depends_on.nodes, expected_depends_on_nodes)
    -
    -        # Add model referring to metric
    -        self.copy_file('test-files/metric_model_a.sql', 'models/metric_model_a.sql')
    -        results = self.run_dbt(["run"])
    -        manifest = get_manifest()
    -        model_a = manifest.nodes['model.test.metric_model_a']
    -        expected_depends_on_nodes = ['metric.test.number_of_people', 'metric.test.collective_tenure']
    -        self.assertEqual(model_a.depends_on.nodes, expected_depends_on_nodes)
    -
    -        # Then delete a metric
    -        self.copy_file('test-files/people_metrics3.yml', 'models/people_metrics.yml')
    -        with self.assertRaises(CompilationException):
    -            # We use "parse" here and not "run" because we're checking that the CompilationException
    -            # occurs at parse time, not compilation
    -            results = self.run_dbt(["parse"])
    -
    diff --git a/test/integration/068_partial_parsing_tests/test_pp_vars.py b/test/integration/068_partial_parsing_tests/test_pp_vars.py
    deleted file mode 100644
    index e5f0752f6a9..00000000000
    --- a/test/integration/068_partial_parsing_tests/test_pp_vars.py
    +++ /dev/null
    @@ -1,416 +0,0 @@
    -from dbt.exceptions import CompilationException, ParsingException
    -from dbt.constants import SECRET_ENV_PREFIX
    -from dbt.contracts.graph.manifest import Manifest
    -from dbt.contracts.files import ParseFileType
    -from dbt.contracts.results import TestStatus
    -from dbt.parser.partial import special_override_macros
    -from test.integration.base import DBTIntegrationTest, use_profile, normalize, get_manifest
    -import shutil
    -import os
    -
    -
    -# Note: every test case needs to have separate directories, otherwise
    -# they will interfere with each other when tests are multi-threaded
    -
    -class BasePPTest(DBTIntegrationTest):
    -
    -    @property
    -    def schema(self):
    -        return "test_068A"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -            'seed-paths': ['seeds'],
    -            'test-paths': ['tests'],
    -            'macro-paths': ['macros'],
    -            'seeds': {
    -                'quote_columns': False,
    -            },
    -        }
    -
    -    def setup_directories(self):
    -        # Create the directories for the test in the `self.test_root_dir`
    -        # directory after everything else is symlinked. We can copy to and
    -        # delete files in this directory without tests interfering with each other.
    -        os.mkdir(os.path.join(self.test_root_dir, 'models'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'tests'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'macros'))
    -        os.mkdir(os.path.join(self.test_root_dir, 'seeds'))
    -        os.environ['DBT_PP_TEST'] = 'true'
    -
    -
    -class EnvVarTest(BasePPTest):
    -
    -    @use_profile('postgres')
    -    def test_postgres_env_vars_models(self):
    -        self.setup_directories()
    -        self.copy_file('test-files/model_color.sql', 'models/model_color.sql')
    -        # initial run
    -        self.run_dbt(['clean'])
    -        results = self.run_dbt(["run"])
    -        self.assertEqual(len(results), 1)
    -
    -        # copy a file with an env_var call without an env_var
    -        self.copy_file('test-files/env_var_model.sql', 'models/env_var_model.sql')
    -        with self.assertRaises(ParsingException):
    -            results = self.run_dbt(["--partial-parse", "run"])
    -
    -        # set the env var
    -        os.environ['ENV_VAR_TEST'] = 'TestingEnvVars'
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 2)
    -        manifest = get_manifest()
    -        expected_env_vars = {"ENV_VAR_TEST": "TestingEnvVars"}
    -        self.assertEqual(expected_env_vars, manifest.env_vars)
    -        model_id = 'model.test.env_var_model'
    -        model = manifest.nodes[model_id]
    -        model_created_at = model.created_at
    -
    -        # change the env var
    -        os.environ['ENV_VAR_TEST'] = 'second'
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 2)
    -        manifest = get_manifest()
    -        expected_env_vars = {"ENV_VAR_TEST": "second"}
    -        self.assertEqual(expected_env_vars, manifest.env_vars)
    -        self.assertNotEqual(model_created_at, manifest.nodes[model_id].created_at)
    -
    -        # set an env_var in a schema file
    -        self.copy_file('test-files/env_var_schema.yml', 'models/schema.yml')
    -        self.copy_file('test-files/env_var_model_one.sql', 'models/model_one.sql')
    -        with self.assertRaises(ParsingException):
    -            results = self.run_dbt(["--partial-parse", "run"])
    -
    -        # actually set the env_var
    -        os.environ['TEST_SCHEMA_VAR'] = 'view'
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        manifest = get_manifest()
    -        expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view"}
    -        self.assertEqual(expected_env_vars, manifest.env_vars)
    -
    -        # env vars in a source
    -        os.environ['ENV_VAR_DATABASE'] = 'dbt'
    -        os.environ['ENV_VAR_SEVERITY'] = 'warn'
    -        self.copy_file('test-files/raw_customers.csv', 'seeds/raw_customers.csv')
    -        self.copy_file('test-files/env_var-sources.yml', 'models/sources.yml')
    -        self.run_dbt(['--partial-parse', 'seed'])
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -        manifest = get_manifest()
    -        expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view", "ENV_VAR_DATABASE": "dbt", "ENV_VAR_SEVERITY": "warn"}
    -        self.assertEqual(expected_env_vars, manifest.env_vars)
    -        self.assertEqual(len(manifest.sources), 1)
    -        source_id = 'source.test.seed_sources.raw_customers'
    -        source = manifest.sources[source_id]
    -        self.assertEqual(source.database, 'dbt')
    -        schema_file = manifest.files[source.file_id]
    -        test_id = 'test.test.source_not_null_seed_sources_raw_customers_id.e39ee7bf0d'
    -        test_node = manifest.nodes[test_id]
    -        self.assertEqual(test_node.config.severity, 'WARN')
    -
    -        # Change severity env var
    -        os.environ['ENV_VAR_SEVERITY'] = 'error'
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        manifest = get_manifest()
    -        expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view", "ENV_VAR_DATABASE": "dbt", "ENV_VAR_SEVERITY": "error"}
    -        self.assertEqual(expected_env_vars, manifest.env_vars)
    -        source_id = 'source.test.seed_sources.raw_customers'
    -        source = manifest.sources[source_id]
    -        schema_file = manifest.files[source.file_id]
    -        expected_schema_file_env_vars = {'sources': {'seed_sources': ['ENV_VAR_DATABASE', 'ENV_VAR_SEVERITY']}}
    -        self.assertEqual(expected_schema_file_env_vars, schema_file.env_vars)
    -        test_node = manifest.nodes[test_id]
    -        self.assertEqual(test_node.config.severity, 'ERROR')
    -
    -        # Change database env var
    -        os.environ['ENV_VAR_DATABASE'] = 'test_dbt'
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        manifest = get_manifest()
    -        expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view", "ENV_VAR_DATABASE": "test_dbt", "ENV_VAR_SEVERITY": "error"}
    -        self.assertEqual(expected_env_vars, manifest.env_vars)
    -        source = manifest.sources[source_id]
    -        self.assertEqual(source.database, 'test_dbt')
    -
    -        # Delete database env var
    -        del os.environ['ENV_VAR_DATABASE']
    -        with self.assertRaises(ParsingException):
    -            results = self.run_dbt(["--partial-parse", "run"])
    -        os.environ['ENV_VAR_DATABASE'] = 'test_dbt'
    -
    -        # Add generic test with test kwarg that's rendered late (no curly brackets)
    -        os.environ['ENV_VAR_DATABASE'] = 'dbt'
    -        self.copy_file('test-files/test_color.sql', 'macros/test_color.sql')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        # Add source test using test_color and an env_var for color
    -        self.copy_file('test-files/env_var_schema2.yml', 'models/schema.yml')
    -        with self.assertRaises(ParsingException):
    -            results = self.run_dbt(["--partial-parse", "run"])
    -        os.environ['ENV_VAR_COLOR'] = 'green'
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        manifest = get_manifest()
    -        test_color_id = 'test.test.check_color_model_one_env_var_ENV_VAR_COLOR___fun.89638de387'
    -        test_node = manifest.nodes[test_color_id]
    -        # kwarg was rendered but not changed (it will be rendered again when compiled)
    -        self.assertEqual(test_node.test_metadata.kwargs['color'], "env_var('ENV_VAR_COLOR')")
    -        results = self.run_dbt(["--partial-parse", "test"])
    -
    -        # Add an exposure with an env_var
    -        os.environ['ENV_VAR_OWNER'] = "John Doe"
    -        self.copy_file('test-files/env_var_schema3.yml', 'models/schema.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        manifest = get_manifest()
    -        expected_env_vars = {
    -            "ENV_VAR_TEST": "second",
    -            "TEST_SCHEMA_VAR": "view",
    -            "ENV_VAR_DATABASE": "dbt",
    -            "ENV_VAR_SEVERITY": "error",
    -            "ENV_VAR_COLOR": 'green',
    -            "ENV_VAR_OWNER": "John Doe",
    -        }
    -        self.assertEqual(expected_env_vars, manifest.env_vars)
    -        exposure = list(manifest.exposures.values())[0]
    -        schema_file = manifest.files[exposure.file_id]
    -        expected_sf_env_vars = {
    -            'models': {
    -                'model_one': ['TEST_SCHEMA_VAR', 'ENV_VAR_COLOR']
    -            },
    -            'exposures': {
    -                'proxy_for_dashboard': ['ENV_VAR_OWNER']
    -            }
    -        }
    -        self.assertEqual(expected_sf_env_vars, schema_file.env_vars)
    -
    -        # add a macro and a macro schema file
    -        os.environ['ENV_VAR_SOME_KEY'] = 'toodles'
    -        self.copy_file('test-files/env_var_macro.sql', 'macros/env_var_macro.sql')
    -        self.copy_file('test-files/env_var_macros.yml', 'macros/env_var_macros.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        manifest = get_manifest()
    -        expected_env_vars = {
    -            "ENV_VAR_TEST": "second",
    -            "TEST_SCHEMA_VAR": "view",
    -            "ENV_VAR_DATABASE": "dbt",
    -            "ENV_VAR_SEVERITY": "error",
    -            "ENV_VAR_COLOR": 'green',
    -            "ENV_VAR_OWNER": "John Doe",
    -            "ENV_VAR_SOME_KEY": "toodles",
    -        }
    -        self.assertEqual(expected_env_vars, manifest.env_vars)
    -        macro_id = 'macro.test.do_something'
    -        macro = manifest.macros[macro_id]
    -        self.assertEqual(macro.meta, {"some_key": "toodles"})
    -        # change the env var
    -        os.environ['ENV_VAR_SOME_KEY'] = 'dumdedum'
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        manifest = get_manifest()
    -        macro = manifest.macros[macro_id]
    -        self.assertEqual(macro.meta, {"some_key": "dumdedum"})
    -
    -        # Add a schema file with a test on model_color and env_var in test enabled config
    -        self.copy_file('test-files/env_var_model_test.yml', 'models/schema.yml')
    -        results = self.run_dbt(["--partial-parse", "run"])
    -        self.assertEqual(len(results), 3)
    -        manifest = get_manifest()
    -        model_color = manifest.nodes['model.test.model_color']
    -        schema_file = manifest.files[model_color.patch_path]
    -        expected_env_vars = {'models': {'model_one': ['TEST_SCHEMA_VAR', 'ENV_VAR_COLOR'], 'model_color': ['ENV_VAR_ENABLED']}, 'exposures': {'proxy_for_dashboard': ['ENV_VAR_OWNER']}}
    -        self.assertEqual(expected_env_vars, schema_file.env_vars)
    -
    -        # Add a metrics file with env_vars
    -        os.environ['ENV_VAR_METRICS'] = 'TeStInG'
    -        self.copy_file('test-files/people.sql', 'models/people.sql')
    -        self.copy_file('test-files/env_var_metrics.yml', 'models/metrics.yml')
    -        results = self.run_dbt(["run"])
    -        manifest = get_manifest()
    -        self.assertIn('ENV_VAR_METRICS', manifest.env_vars)
    -        self.assertEqual(manifest.env_vars['ENV_VAR_METRICS'], 'TeStInG')
    -        metric_node = manifest.metrics['metric.test.number_of_people']
    -        self.assertEqual(metric_node.meta, {'my_meta': 'TeStInG'})
    -
    -        # Change metrics env var
    -        os.environ['ENV_VAR_METRICS'] = 'Changed!'
    -        results = self.run_dbt(["run"])
    -        manifest = get_manifest()
    -        metric_node = manifest.metrics['metric.test.number_of_people']
    -        self.assertEqual(metric_node.meta, {'my_meta': 'Changed!'})
    -
    -        # delete the env vars to cleanup
    -        del os.environ['ENV_VAR_TEST']
    -        del os.environ['ENV_VAR_SEVERITY']
    -        del os.environ['ENV_VAR_DATABASE']
    -        del os.environ['TEST_SCHEMA_VAR']
    -        del os.environ['ENV_VAR_COLOR']
    -        del os.environ['ENV_VAR_SOME_KEY']
    -        del os.environ['ENV_VAR_OWNER']
    -        del os.environ['ENV_VAR_METRICS']
    -
    -
    -class ProjectEnvVarTest(BasePPTest):
    -
    -    @property
    -    def project_config(self):
    -        # Need to set the environment variable here initially because
    -        # the unittest setup does a load_config.
    -        os.environ['ENV_VAR_NAME'] = "Jane Smith"
    -        return {
    -            'config-version': 2,
    -            'seed-paths': ['seeds'],
    -            'test-paths': ['tests'],
    -            'macro-paths': ['macros'],
    -            'seeds': {
    -                'quote_columns': False,
    -            },
    -            'models': {
    -                '+meta': {
    -                    'meta_name': "{{ env_var('ENV_VAR_NAME') }}"
    -                }
    -            }
    -        }
    -
    -    @use_profile('postgres')
    -    def test_postgres_project_env_vars(self):
    -
    -        # Initial run
    -        self.setup_directories()
    -        self.copy_file('test-files/model_one.sql', 'models/model_one.sql')
    -        self.run_dbt(['clean'])
    -        results = self.run_dbt(["run"])
    -        self.assertEqual(len(results), 1)
    -        manifest = get_manifest()
    -        state_check = manifest.state_check
    -        model_id = 'model.test.model_one'
    -        model = manifest.nodes[model_id]
    -        self.assertEqual(model.config.meta['meta_name'], 'Jane Smith')
    -        env_vars_hash_checksum = state_check.project_env_vars_hash.checksum
    -
    -        # Change the environment variable
    -        os.environ['ENV_VAR_NAME'] = "Jane Doe"
    -        results = self.run_dbt(["run"])
    -        self.assertEqual(len(results), 1)
    -        manifest = get_manifest()
    -        model = manifest.nodes[model_id]
    -        self.assertEqual(model.config.meta['meta_name'], 'Jane Doe')
    -        self.assertNotEqual(env_vars_hash_checksum, manifest.state_check.project_env_vars_hash.checksum)
    -
    -        # cleanup
    -        del os.environ['ENV_VAR_NAME']
    -
    -
    -class ProfileEnvVarTest(BasePPTest):
    -
    -    @property
    -    def profile_config(self):
    -        # Need to set these here because the base integration test class
    -        # calls 'load_config' before the tests are run.
    -        # Note: only the specified profile is rendered, so there's no
    -        # point it setting env_vars in non-used profiles.
    -        os.environ['ENV_VAR_USER'] = 'root'
    -        os.environ['ENV_VAR_PASS'] = 'password'
    -        return {
    -            'config': {
    -                'send_anonymous_usage_stats': False
    -            },
    -            'test': {
    -                'outputs': {
    -                    'dev': {
    -                        'type': 'postgres',
    -                        'threads': 1,
    -                        'host': self.database_host,
    -                        'port': 5432,
    -                        'user': "root",
    -                        'pass': "password",
    -                        'user': "{{ env_var('ENV_VAR_USER') }}",
    -                        'pass': "{{ env_var('ENV_VAR_PASS') }}",
    -                        'dbname': 'dbt',
    -                        'schema': self.unique_schema()
    -                    },
    -                },
    -                'target': 'dev'
    -            }
    -        }
    -
    -    @use_profile('postgres')
    -    def test_postgres_profile_env_vars(self):
    -
    -        # Initial run
    -        os.environ['ENV_VAR_USER'] = 'root'
    -        os.environ['ENV_VAR_PASS'] = 'password'
    -        self.setup_directories()
    -        self.copy_file('test-files/model_one.sql', 'models/model_one.sql')
    -        results = self.run_dbt(["run"])
    -        manifest = get_manifest()
    -        env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum
    -
    -        # Change env_vars, the user doesn't exist, this should fail
    -        os.environ['ENV_VAR_USER'] = 'fake_user'
    -        (results, log_output) = self.run_dbt_and_capture(["run"], expect_pass=False)
    -        self.assertTrue('env vars used in profiles.yml have changed' in log_output)
    -        manifest = get_manifest()
    -        self.assertNotEqual(env_vars_checksum, manifest.state_check.profile_env_vars_hash.checksum)
    -
    -
    -class ProfileSecretEnvVarTest(BasePPTest):
    -
    -    @property
    -    def profile_config(self):
    -        # Need to set these here because the base integration test class
    -        # calls 'load_config' before the tests are run.
    -        # Note: only the specified profile is rendered, so there's no
    -        # point it setting env_vars in non-used profiles.
    -
    -        # user is secret and password is not. postgres on macos doesn't care if the password
    -        # changes so we have to change the user. related: https://github.com/dbt-labs/dbt-core/pull/4250
    -        os.environ[SECRET_ENV_PREFIX + 'USER'] = 'root'
    -        os.environ['ENV_VAR_PASS'] = 'password'
    -        return {
    -            'config': {
    -                'send_anonymous_usage_stats': False
    -            },
    -            'test': {
    -                'outputs': {
    -                    'dev': {
    -                        'type': 'postgres',
    -                        'threads': 1,
    -                        'host': self.database_host,
    -                        'port': 5432,
    -                        'user': "root",
    -                        'pass': "password",
    -                        'user': "{{ env_var('DBT_ENV_SECRET_USER') }}",
    -                        'pass': "{{ env_var('ENV_VAR_PASS') }}",
    -                        'dbname': 'dbt',
    -                        'schema': self.unique_schema()
    -                    },
    -                },
    -                'target': 'dev'
    -            }
    -        }
    -
    -    @use_profile('postgres')
    -    def test_postgres_profile_secret_env_vars(self):
    -
    -        # Initial run
    -        os.environ[SECRET_ENV_PREFIX + 'USER'] = 'root'
    -        os.environ['ENV_VAR_PASS'] = 'password'
    -        self.setup_directories()
    -        self.copy_file('test-files/model_one.sql', 'models/model_one.sql')
    -        results = self.run_dbt(["run"])
    -        manifest = get_manifest()
    -        env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum
    -
    -        # Change a secret var, it shouldn't register because we shouldn't save secrets.
    -        os.environ[SECRET_ENV_PREFIX + 'USER'] = 'boop'
    -        # this dbt run is going to fail because the password isn't actually the right one,
    -        # but that doesn't matter because we just want to see if the manifest has included
    -        # the secret in the hash of environment variables.
    -        (results, log_output) = self.run_dbt_and_capture(["run"], expect_pass=False)
    -        # I020 is the event code for "env vars used in profiles.yml have changed"
    -        self.assertFalse('I020' in log_output) 
    -        manifest = get_manifest()
    -        self.assertEqual(env_vars_checksum, manifest.state_check.profile_env_vars_hash.checksum)
    -
    diff --git a/test/integration/069_build_tests/models-circular-relationship/model_0.sql b/test/integration/069_build_tests/models-circular-relationship/model_0.sql
    deleted file mode 100644
    index 2fe54b32418..00000000000
    --- a/test/integration/069_build_tests/models-circular-relationship/model_0.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select * from {{ ref('countries') }}
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/models-circular-relationship/model_1.sql b/test/integration/069_build_tests/models-circular-relationship/model_1.sql
    deleted file mode 100644
    index b11c0b7b7ed..00000000000
    --- a/test/integration/069_build_tests/models-circular-relationship/model_1.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select * from {{ ref('model_0') }}
    diff --git a/test/integration/069_build_tests/models-circular-relationship/model_99.sql b/test/integration/069_build_tests/models-circular-relationship/model_99.sql
    deleted file mode 100644
    index a680446bea0..00000000000
    --- a/test/integration/069_build_tests/models-circular-relationship/model_99.sql
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select '1' as "num"
    -
    diff --git a/test/integration/069_build_tests/models-circular-relationship/test.yml b/test/integration/069_build_tests/models-circular-relationship/test.yml
    deleted file mode 100644
    index 991dde8a22a..00000000000
    --- a/test/integration/069_build_tests/models-circular-relationship/test.yml
    +++ /dev/null
    @@ -1,18 +0,0 @@
    -version: 2
    -
    -models:
    -  - name: model_0
    -    columns:
    -      - name: iso3
    -        tests:
    -          - relationships:
    -              to: ref('model_1')
    -              field: iso3
    -
    -  - name: model_1
    -    columns:
    -      - name: iso3
    -        tests:
    -          - relationships:
    -              to: ref('model_0')
    -              field: iso3
    diff --git a/test/integration/069_build_tests/models-failing/model_0.sql b/test/integration/069_build_tests/models-failing/model_0.sql
    deleted file mode 100644
    index 2fe54b32418..00000000000
    --- a/test/integration/069_build_tests/models-failing/model_0.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select * from {{ ref('countries') }}
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/models-failing/model_1.sql b/test/integration/069_build_tests/models-failing/model_1.sql
    deleted file mode 100644
    index cc5cf86c1e4..00000000000
    --- a/test/integration/069_build_tests/models-failing/model_1.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select bad_column from {{ ref('snap_0') }}
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/models-failing/model_2.sql b/test/integration/069_build_tests/models-failing/model_2.sql
    deleted file mode 100644
    index 25bea5224cf..00000000000
    --- a/test/integration/069_build_tests/models-failing/model_2.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select * from {{ ref('snap_1') }}
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/models-failing/model_3.sql b/test/integration/069_build_tests/models-failing/model_3.sql
    deleted file mode 100644
    index bc0d81e14e5..00000000000
    --- a/test/integration/069_build_tests/models-failing/model_3.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select * from {{ ref('model_1') }}
    diff --git a/test/integration/069_build_tests/models-failing/model_99.sql b/test/integration/069_build_tests/models-failing/model_99.sql
    deleted file mode 100644
    index 38c103e823b..00000000000
    --- a/test/integration/069_build_tests/models-failing/model_99.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select '1' as "num"
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/models-failing/test.yml b/test/integration/069_build_tests/models-failing/test.yml
    deleted file mode 100644
    index 6f9133aa487..00000000000
    --- a/test/integration/069_build_tests/models-failing/test.yml
    +++ /dev/null
    @@ -1,15 +0,0 @@
    -version: 2
    -
    -models:
    -  - name: model_0
    -    columns:
    -      - name: iso3
    -        tests:
    -          - unique
    -          - not_null
    -  - name: model_2
    -    columns:
    -      - name: iso3
    -        tests:
    -          - unique
    -          - not_null
    diff --git a/test/integration/069_build_tests/models-interdependent/model_a.sql b/test/integration/069_build_tests/models-interdependent/model_a.sql
    deleted file mode 100644
    index 43258a71464..00000000000
    --- a/test/integration/069_build_tests/models-interdependent/model_a.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as id
    diff --git a/test/integration/069_build_tests/models-interdependent/model_c.sql b/test/integration/069_build_tests/models-interdependent/model_c.sql
    deleted file mode 100644
    index 6b5ce07801a..00000000000
    --- a/test/integration/069_build_tests/models-interdependent/model_c.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select * from {{ ref('model_b') }}
    diff --git a/test/integration/069_build_tests/models-interdependent/schema.yml b/test/integration/069_build_tests/models-interdependent/schema.yml
    deleted file mode 100644
    index 1d3fe4a9bfa..00000000000
    --- a/test/integration/069_build_tests/models-interdependent/schema.yml
    +++ /dev/null
    @@ -1,41 +0,0 @@
    -version: 2
    -
    -models:
    -  - name: model_a
    -    columns:
    -      - name: id
    -        tests:
    -          - unique
    -          - not_null
    -          - relationships:
    -              to: ref('model_b')
    -              field: id
    -          - relationships:
    -              to: ref('model_c')
    -              field: id
    -
    -  - name: model_b
    -    columns:
    -      - name: id
    -        tests:
    -          - unique
    -          - not_null
    -          - relationships:
    -              to: ref('model_a')
    -              field: id
    -          - relationships:
    -              to: ref('model_c')
    -              field: id
    -
    -  - name: model_c
    -    columns:
    -      - name: id
    -        tests:
    -          - unique
    -          - not_null
    -          - relationships:
    -              to: ref('model_a')
    -              field: id
    -          - relationships:
    -              to: ref('model_b')
    -              field: id
    diff --git a/test/integration/069_build_tests/models-simple-blocking/model_a.sql b/test/integration/069_build_tests/models-simple-blocking/model_a.sql
    deleted file mode 100644
    index 23fa9a380d7..00000000000
    --- a/test/integration/069_build_tests/models-simple-blocking/model_a.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select null as id
    diff --git a/test/integration/069_build_tests/models-simple-blocking/model_b.sql b/test/integration/069_build_tests/models-simple-blocking/model_b.sql
    deleted file mode 100644
    index ad13bfaf538..00000000000
    --- a/test/integration/069_build_tests/models-simple-blocking/model_b.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select * from {{ ref('model_a') }}
    diff --git a/test/integration/069_build_tests/models-simple-blocking/schema.yml b/test/integration/069_build_tests/models-simple-blocking/schema.yml
    deleted file mode 100644
    index 92f1934fb25..00000000000
    --- a/test/integration/069_build_tests/models-simple-blocking/schema.yml
    +++ /dev/null
    @@ -1,8 +0,0 @@
    -version: 2
    -
    -models:
    -  - name: model_a
    -    columns:
    -      - name: id
    -        tests:
    -          - not_null
    diff --git a/test/integration/069_build_tests/models/model_0.sql b/test/integration/069_build_tests/models/model_0.sql
    deleted file mode 100644
    index 2fe54b32418..00000000000
    --- a/test/integration/069_build_tests/models/model_0.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select * from {{ ref('countries') }}
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/models/model_1.sql b/test/integration/069_build_tests/models/model_1.sql
    deleted file mode 100644
    index d8efda2c3b2..00000000000
    --- a/test/integration/069_build_tests/models/model_1.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select * from {{ ref('snap_0') }}
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/models/model_2.sql b/test/integration/069_build_tests/models/model_2.sql
    deleted file mode 100644
    index 25bea5224cf..00000000000
    --- a/test/integration/069_build_tests/models/model_2.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select * from {{ ref('snap_1') }}
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/models/model_99.sql b/test/integration/069_build_tests/models/model_99.sql
    deleted file mode 100644
    index 38c103e823b..00000000000
    --- a/test/integration/069_build_tests/models/model_99.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select '1' as "num"
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/models/test.yml b/test/integration/069_build_tests/models/test.yml
    deleted file mode 100644
    index 6f9133aa487..00000000000
    --- a/test/integration/069_build_tests/models/test.yml
    +++ /dev/null
    @@ -1,15 +0,0 @@
    -version: 2
    -
    -models:
    -  - name: model_0
    -    columns:
    -      - name: iso3
    -        tests:
    -          - unique
    -          - not_null
    -  - name: model_2
    -    columns:
    -      - name: iso3
    -        tests:
    -          - unique
    -          - not_null
    diff --git a/test/integration/069_build_tests/seeds/countries.csv b/test/integration/069_build_tests/seeds/countries.csv
    deleted file mode 100644
    index 82db396fd6f..00000000000
    --- a/test/integration/069_build_tests/seeds/countries.csv
    +++ /dev/null
    @@ -1,10 +0,0 @@
    -"iso3","name","iso2","iso_numeric","cow_alpha","cow_numeric","fao_code","un_code","wb_code","imf_code","fips","geonames_name","geonames_id","r_name","aiddata_name","aiddata_code","oecd_name","oecd_code","historical_name","historical_iso3","historical_iso2","historical_iso_numeric"
    -"ABW","Aruba","AW","533","","","","533","ABW","314","AA","Aruba","3577279","ARUBA","Aruba","12","Aruba","373","","","",""
    -"AFG","Afghanistan","AF","4","AFG","700","2","4","AFG","512","AF","Afghanistan","1149361","AFGHANISTAN","Afghanistan","1","Afghanistan","625","","","",""
    -"AGO","Angola","AO","24","ANG","540","7","24","AGO","614","AO","Angola","3351879","ANGOLA","Angola","7","Angola","225","","","",""
    -"AIA","Anguilla","AI","660","","","","660","AIA","312","AV","Anguilla","3573511","ANGUILLA","Anguilla","8","Anguilla","376","","","",""
    -"ALA","Aland Islands","AX","248","","","","248","ALA","","","Aland Islands","661882","ALAND ISLANDS","","","","","","","",""
    -"ALB","Albania","AL","8","ALB","339","3","8","ALB","914","AL","Albania","783754","ALBANIA","Albania","3","Albania","71","","","",""
    -"AND","Andorra","AD","20","AND","232","6","20","ADO","","AN","Andorra","3041565","ANDORRA","","","","","","","",""
    -"ANT","Netherlands Antilles","AN","530","","","","","ANT","353","NT","Netherlands Antilles","","NETHERLANDS ANTILLES","Netherlands Antilles","211","Netherlands Antilles","361","Netherlands Antilles","ANT","AN","530"
    -"ARE","United Arab Emirates","AE","784","UAE","696","225","784","ARE","466","AE","United Arab Emirates","290557","UNITED ARAB EMIRATES","United Arab Emirates","140","United Arab Emirates","576","","","",""
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/snapshots/snap_0.sql b/test/integration/069_build_tests/snapshots/snap_0.sql
    deleted file mode 100644
    index 03e8e491f21..00000000000
    --- a/test/integration/069_build_tests/snapshots/snap_0.sql
    +++ /dev/null
    @@ -1,16 +0,0 @@
    -{% snapshot snap_0 %}
    -
    -{{
    -    config(
    -      target_database=database,
    -      target_schema=schema,
    -      unique_key='iso3',
    -
    -      strategy='timestamp',
    -      updated_at='snap_0_updated_at',
    -    )
    -}}
    -
    -select *, current_timestamp as snap_0_updated_at from {{ ref('model_0') }}
    -
    -{% endsnapshot %}
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/snapshots/snap_1.sql b/test/integration/069_build_tests/snapshots/snap_1.sql
    deleted file mode 100644
    index 90455ed4625..00000000000
    --- a/test/integration/069_build_tests/snapshots/snap_1.sql
    +++ /dev/null
    @@ -1,39 +0,0 @@
    -{% snapshot snap_1 %}
    -
    -{{
    -    config(
    -      target_database=database,
    -      target_schema=schema,
    -      unique_key='iso3',
    -
    -      strategy='timestamp',
    -      updated_at='snap_1_updated_at',
    -    )
    -}}
    -
    -SELECT 
    -  iso3, 
    -  "name", 
    -  iso2, 
    -  iso_numeric, 
    -  cow_alpha, 
    -  cow_numeric, 
    -  fao_code, 
    -  un_code, 
    -  wb_code, 
    -  imf_code, 
    -  fips, 
    -  geonames_name, 
    -  geonames_id, 
    -  r_name, 
    -  aiddata_name, 
    -  aiddata_code, 
    -  oecd_name, 
    -  oecd_code, 
    -  historical_name, 
    -  historical_iso3, 
    -  historical_iso2, 
    -  historical_iso_numeric,
    -  current_timestamp as snap_1_updated_at from {{ ref('model_1') }}
    -
    -{% endsnapshot %}
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/snapshots/snap_99.sql b/test/integration/069_build_tests/snapshots/snap_99.sql
    deleted file mode 100644
    index 5288dbbb805..00000000000
    --- a/test/integration/069_build_tests/snapshots/snap_99.sql
    +++ /dev/null
    @@ -1,15 +0,0 @@
    -{% snapshot snap_99 %}
    -
    -{{
    -    config(
    -      target_database=database,
    -      target_schema=schema,
    -      strategy='timestamp',
    -      unique_key='num',
    -      updated_at='snap_99_updated_at',
    -    )
    -}}
    -
    -select *, current_timestamp as snap_99_updated_at from {{ ref('model_99') }}
    -
    -{% endsnapshot %}
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/test-files/model_b.sql b/test/integration/069_build_tests/test-files/model_b.sql
    deleted file mode 100644
    index 24cb03c7e01..00000000000
    --- a/test/integration/069_build_tests/test-files/model_b.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select * from {{ ref('model_a') }} 
    diff --git a/test/integration/069_build_tests/test-files/model_b_null.sql b/test/integration/069_build_tests/test-files/model_b_null.sql
    deleted file mode 100644
    index 4e5224ddf72..00000000000
    --- a/test/integration/069_build_tests/test-files/model_b_null.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select null from {{ ref('model_a') }} 
    diff --git a/test/integration/069_build_tests/test_build.py b/test/integration/069_build_tests/test_build.py
    deleted file mode 100644
    index 628367082e7..00000000000
    --- a/test/integration/069_build_tests/test_build.py
    +++ /dev/null
    @@ -1,143 +0,0 @@
    -from test.integration.base import DBTIntegrationTest, use_profile, normalize
    -import yaml
    -import shutil
    -import os
    -
    -
    -class TestBuildBase(DBTIntegrationTest):
    -    @property
    -    def schema(self):
    -        return "build_test_069"
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            "config-version": 2,
    -            "snapshot-paths": ["snapshots"],
    -            "seed-paths": ["seeds"],
    -            "seeds": {
    -                "quote_columns": False,
    -            },
    -        }
    -
    -    def build(self, expect_pass=True, extra_args=None, **kwargs):
    -        args = ["build"]
    -        if kwargs:
    -            args.extend(("--args", yaml.safe_dump(kwargs)))
    -        if extra_args:
    -            args.extend(extra_args)
    -
    -        return self.run_dbt(args, expect_pass=expect_pass)
    -
    -
    -class TestPassingBuild(TestBuildBase):
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @use_profile("postgres")
    -    def test__postgres_build_happy_path(self):
    -        self.build()
    -
    -
    -class TestFailingBuild(TestBuildBase):
    -    @property
    -    def models(self):
    -        return "models-failing"
    -
    -    @use_profile("postgres")
    -    def test__postgres_build_happy_path(self):
    -        results = self.build(expect_pass=False)
    -        self.assertEqual(len(results), 13)
    -        actual = [r.status for r in results]
    -        expected = ['error']*1 + ['skipped']*5 + ['pass']*2 + ['success']*5
    -        self.assertEqual(sorted(actual), sorted(expected))
    -
    -
    -class TestFailingTestsBuild(TestBuildBase):
    -    @property
    -    def models(self):
    -        return "tests-failing"
    -
    -    @use_profile("postgres")
    -    def test__postgres_failing_test_skips_downstream(self):
    -        results = self.build(expect_pass=False)
    -        self.assertEqual(len(results), 13)
    -        actual = [str(r.status) for r in results]
    -        expected = ['fail'] + ['skipped']*6 + ['pass']*2 + ['success']*4
    -        self.assertEqual(sorted(actual), sorted(expected))
    -
    -
    -class TestCircularRelationshipTestsBuild(TestBuildBase):
    -    @property
    -    def models(self):
    -        return "models-circular-relationship"
    -
    -    @use_profile("postgres")
    -    def test__postgres_circular_relationship_test_success(self):
    -        """ Ensure that tests that refer to each other's model don't create
    -        a circular dependency. """
    -        results = self.build()
    -        actual = [r.status for r in results]
    -        expected = ['success']*7 + ['pass']*2
    -        self.assertEqual(sorted(actual), sorted(expected))
    -
    -
    -class TestSimpleBlockingTest(TestBuildBase):
    -    @property
    -    def models(self):
    -        return "models-simple-blocking"
    -        
    -    @property
    -    def project_config(self):
    -        return {
    -            "config-version": 2,
    -            "snapshot-paths": ["does-not-exist"],
    -            "seed-paths": ["does-not-exist"],
    -        }
    -
    -    @use_profile("postgres")
    -    def test__postgres_simple_blocking_test(self):
    -        """ Ensure that a failed test on model_a always blocks model_b """
    -        results = self.build(expect_pass=False)
    -        actual = [r.status for r in results]
    -        expected = ['success', 'fail', 'skipped']
    -        self.assertEqual(sorted(actual), sorted(expected))
    -
    -
    -class TestInterdependentModels(TestBuildBase):
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            "config-version": 2,
    -            "snapshot-paths": ["snapshots-none"],
    -            "seeds": {
    -                "quote_columns": False,
    -            },
    -        }
    -
    -    @property
    -    def models(self):
    -        return "models-interdependent"
    -
    -    def tearDown(self):
    -        if os.path.exists(normalize('models-interdependent/model_b.sql')):
    -            os.remove(normalize('models-interdependent/model_b.sql'))
    -
    -
    -    @use_profile("postgres")
    -    def test__postgres_interdependent_models(self):
    -        # check that basic build works
    -        shutil.copyfile('test-files/model_b.sql', 'models-interdependent/model_b.sql')
    -        results = self.build()
    -        self.assertEqual(len(results), 16)
    -
    -        # return null from model_b
    -        shutil.copyfile('test-files/model_b_null.sql', 'models-interdependent/model_b.sql')
    -        results = self.build(expect_pass=False)
    -        self.assertEqual(len(results), 16)
    -        actual = [str(r.status) for r in results]
    -        expected = ['error']*4 + ['skipped']*7 + ['pass']*2 + ['success']*3
    -        self.assertEqual(sorted(actual), sorted(expected))
    -
    diff --git a/test/integration/069_build_tests/tests-failing/model_0.sql b/test/integration/069_build_tests/tests-failing/model_0.sql
    deleted file mode 100644
    index 2fe54b32418..00000000000
    --- a/test/integration/069_build_tests/tests-failing/model_0.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select * from {{ ref('countries') }}
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/tests-failing/model_1.sql b/test/integration/069_build_tests/tests-failing/model_1.sql
    deleted file mode 100644
    index 058c968c760..00000000000
    --- a/test/integration/069_build_tests/tests-failing/model_1.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select * from {{ ref('snap_0') }}
    diff --git a/test/integration/069_build_tests/tests-failing/model_2.sql b/test/integration/069_build_tests/tests-failing/model_2.sql
    deleted file mode 100644
    index 25bea5224cf..00000000000
    --- a/test/integration/069_build_tests/tests-failing/model_2.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select * from {{ ref('snap_1') }}
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/tests-failing/model_99.sql b/test/integration/069_build_tests/tests-failing/model_99.sql
    deleted file mode 100644
    index 38c103e823b..00000000000
    --- a/test/integration/069_build_tests/tests-failing/model_99.sql
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -{{ config(materialized='table') }}
    -
    -select '1' as "num"
    \ No newline at end of file
    diff --git a/test/integration/069_build_tests/tests-failing/test.yml b/test/integration/069_build_tests/tests-failing/test.yml
    deleted file mode 100644
    index c6dbe0e971f..00000000000
    --- a/test/integration/069_build_tests/tests-failing/test.yml
    +++ /dev/null
    @@ -1,18 +0,0 @@
    -version: 2
    -
    -models:
    -  - name: model_0
    -    columns:
    -      - name: iso3
    -        tests:
    -          - unique
    -          - not_null
    -      - name: historical_iso_numeric
    -        tests:
    -          - not_null
    -  - name: model_2
    -    columns:
    -      - name: iso3
    -        tests:
    -          - unique
    -          - not_null
    diff --git a/test/integration/base.py b/test/integration/base.py
    index 602be18525c..f623501fbee 100644
    --- a/test/integration/base.py
    +++ b/test/integration/base.py
    @@ -313,7 +313,7 @@ def setUp(self):
             os.chdir(self.initial_dir)
             # before we go anywhere, collect the initial path info
             self._logs_dir = os.path.join(self.initial_dir, 'logs', self.prefix)
    -        setup_event_logger(self._logs_dir, '', False, True)
    +        setup_event_logger(self._logs_dir)
             _really_makedirs(self._logs_dir)
             self.test_original_source_path = _pytest_get_test_root()
             self.test_root_dir = self._generate_test_root_dir()
    diff --git a/test/unit/test_adapter_connection_manager.py b/test/unit/test_adapter_connection_manager.py
    index 47db6b67ab0..b270f6a5d19 100644
    --- a/test/unit/test_adapter_connection_manager.py
    +++ b/test/unit/test_adapter_connection_manager.py
    @@ -64,7 +64,7 @@ def test_retry_connection_fails_unhandled(self):
             * The Connection state should be "fail" and the handle None.
             * The resulting attempt count should be 1 as we are not explicitly configured to handle a
               ValueError.
    -        * retry_connection should raise a FailedToConnectException with the Exception message.
    +        * retry_connection should raise a FailedToConnectError with the Exception message.
             """
             conn = self.postgres_connection
             attempts = 0
    @@ -75,7 +75,7 @@ def connect():
                 raise ValueError("Something went horribly wrong")
     
             with self.assertRaisesRegex(
    -            dbt.exceptions.FailedToConnectException, "Something went horribly wrong"
    +            dbt.exceptions.FailedToConnectError, "Something went horribly wrong"
             ):
     
                 BaseConnectionManager.retry_connection(
    @@ -99,7 +99,7 @@ def test_retry_connection_fails_handled(self):
             As a result:
             * The Connection state should be "fail" and the handle None.
             * The resulting attempt count should be 2 as we are configured to handle a ValueError.
    -        * retry_connection should raise a FailedToConnectException with the Exception message.
    +        * retry_connection should raise a FailedToConnectError with the Exception message.
             """
             conn = self.postgres_connection
             attempts = 0
    @@ -110,7 +110,7 @@ def connect():
                 raise ValueError("Something went horribly wrong")
     
             with self.assertRaisesRegex(
    -            dbt.exceptions.FailedToConnectException, "Something went horribly wrong"
    +            dbt.exceptions.FailedToConnectError, "Something went horribly wrong"
             ):
     
                 BaseConnectionManager.retry_connection(
    @@ -173,7 +173,7 @@ def test_retry_connection_attempts(self):
             * The Connection state should be "fail" and the handle None, as connect
               never returns.
             * The resulting attempt count should be 11 as we are configured to handle a ValueError.
    -        * retry_connection should raise a FailedToConnectException with the Exception message.
    +        * retry_connection should raise a FailedToConnectError with the Exception message.
             """
             conn = self.postgres_connection
             attempts = 0
    @@ -185,7 +185,7 @@ def connect():
                 raise ValueError("Something went horribly wrong")
     
             with self.assertRaisesRegex(
    -            dbt.exceptions.FailedToConnectException, "Something went horribly wrong"
    +            dbt.exceptions.FailedToConnectError, "Something went horribly wrong"
             ):
                 BaseConnectionManager.retry_connection(
                     conn,
    @@ -208,7 +208,7 @@ def test_retry_connection_fails_handling_all_exceptions(self):
             * The Connection state should be "fail" and the handle None, as connect
               never returns.
             * The resulting attempt count should be 11 as we are configured to handle all Exceptions.
    -        * retry_connection should raise a FailedToConnectException with the Exception message.
    +        * retry_connection should raise a FailedToConnectError with the Exception message.
             """
             conn = self.postgres_connection
             attempts = 0
    @@ -220,7 +220,7 @@ def connect():
                 raise TypeError("An unhandled thing went horribly wrong")
     
             with self.assertRaisesRegex(
    -            dbt.exceptions.FailedToConnectException, "An unhandled thing went horribly wrong"
    +            dbt.exceptions.FailedToConnectError, "An unhandled thing went horribly wrong"
             ):
                 BaseConnectionManager.retry_connection(
                     conn,
    @@ -338,7 +338,7 @@ def connect():
                 return True
     
             with self.assertRaisesRegex(
    -            dbt.exceptions.FailedToConnectException, "retry_limit cannot be negative"
    +            dbt.exceptions.FailedToConnectError, "retry_limit cannot be negative"
             ):
                 BaseConnectionManager.retry_connection(
                     conn,
    @@ -365,7 +365,7 @@ def connect():
     
             for retry_timeout in [-10, -2.5, lambda _: -100, lambda _: -10.1]:
                 with self.assertRaisesRegex(
    -                dbt.exceptions.FailedToConnectException,
    +                dbt.exceptions.FailedToConnectError,
                     "retry_timeout cannot be negative or return a negative time",
                 ):
                     BaseConnectionManager.retry_connection(
    @@ -392,7 +392,7 @@ def connect():
                 return True
     
             with self.assertRaisesRegex(
    -            dbt.exceptions.FailedToConnectException,
    +            dbt.exceptions.FailedToConnectError,
                 "retry_limit cannot be negative",
             ):
                 BaseConnectionManager.retry_connection(
    diff --git a/test/unit/test_cache.py b/test/unit/test_cache.py
    index f69b4783ee1..3f9c6e4f6bf 100644
    --- a/test/unit/test_cache.py
    +++ b/test/unit/test_cache.py
    @@ -121,7 +121,7 @@ def test_dest_exists_error(self):
             self.cache.add(bar)
             self.assert_relations_exist('DBT', 'schema', 'foo', 'bar')
     
    -        with self.assertRaises(dbt.exceptions.InternalException):
    +        with self.assertRaises(dbt.exceptions.DbtInternalError):
                 self.cache.rename(foo, bar)
     
             self.assert_relations_exist('DBT', 'schema', 'foo', 'bar')
    diff --git a/test/unit/test_config.py b/test/unit/test_config.py
    index d45ee86587d..b869ffcf243 100644
    --- a/test/unit/test_config.py
    +++ b/test/unit/test_config.py
    @@ -800,7 +800,7 @@ def test_no_project(self):
             with self.assertRaises(dbt.exceptions.DbtProjectError) as exc:
                 dbt.config.Project.from_project_root(self.project_dir, renderer)
     
    -        self.assertIn('no dbt_project.yml', str(exc.exception))
    +        self.assertIn('No dbt_project.yml', str(exc.exception))
     
         def test_invalid_version(self):
             self.default_project_data['require-dbt-version'] = 'hello!'
    @@ -929,14 +929,14 @@ def tearDown(self):
         def test_configured_task_dir_change(self):
             self.assertEqual(os.getcwd(), INITIAL_ROOT)
             self.assertNotEqual(INITIAL_ROOT, self.project_dir)
    -        new_task = InheritsFromConfiguredTask.from_args(self.args)
    +        InheritsFromConfiguredTask.from_args(self.args)
             self.assertEqual(os.path.realpath(os.getcwd()),
                              os.path.realpath(self.project_dir))
     
         def test_configured_task_dir_change_with_bad_path(self):
             self.args.project_dir = 'bad_path'
    -        with self.assertRaises(dbt.exceptions.RuntimeException):
    -            new_task = InheritsFromConfiguredTask.from_args(self.args)
    +        with self.assertRaises(dbt.exceptions.DbtRuntimeError):
    +            InheritsFromConfiguredTask.from_args(self.args)
     
     
     class TestVariableProjectFile(BaseFileTest):
    @@ -1157,8 +1157,8 @@ def test__warn_for_unused_resource_config_paths(self):
                 project.warn_for_unused_resource_config_paths(self.used, [])
                 warn_or_error_patch.assert_called_once()
                 event = warn_or_error_patch.call_args[0][0]
    -            assert event.info.name == 'UnusedResourceConfigPath'
    -            msg = event.info.msg
    +            assert type(event).__name__ == 'UnusedResourceConfigPath'
    +            msg = event.message()
                 expected_msg = "- models.my_test_project.baz"
                 assert expected_msg in msg
     
    diff --git a/test/unit/test_context.py b/test/unit/test_context.py
    index a567e032f55..34c8562402f 100644
    --- a/test/unit/test_context.py
    +++ b/test/unit/test_context.py
    @@ -89,7 +89,7 @@ def test_var_not_defined(self):
             var = providers.RuntimeVar(self.context, self.config, self.model)
     
             self.assertEqual(var("foo", "bar"), "bar")
    -        with self.assertRaises(dbt.exceptions.CompilationException):
    +        with self.assertRaises(dbt.exceptions.CompilationError):
                 var("foo")
     
         def test_parser_var_default_something(self):
    @@ -464,7 +464,7 @@ def test_macro_namespace_duplicates(config_postgres, manifest_fx):
         mn.add_macros(manifest_fx.macros.values(), {})
     
         # same pkg, same name: error
    -    with pytest.raises(dbt.exceptions.CompilationException):
    +    with pytest.raises(dbt.exceptions.CompilationError):
             mn.add_macro(mock_macro("macro_a", "root"), {})
     
         # different pkg, same name: no error
    diff --git a/test/unit/test_core_dbt_utils.py b/test/unit/test_core_dbt_utils.py
    index 1deb8a77552..546e4f6ca00 100644
    --- a/test/unit/test_core_dbt_utils.py
    +++ b/test/unit/test_core_dbt_utils.py
    @@ -2,7 +2,7 @@
     import tarfile
     import unittest
     
    -from dbt.exceptions import ConnectionException
    +from dbt.exceptions import ConnectionError
     from dbt.utils import _connection_exception_retry as connection_exception_retry
     
     
    @@ -19,7 +19,7 @@ def test_connection_exception_retry_success_requests_exception(self):
     
         def test_connection_exception_retry_max(self):
             Counter._reset()
    -        with self.assertRaises(ConnectionException):
    +        with self.assertRaises(ConnectionError):
                 connection_exception_retry(lambda: Counter._add_with_exception(), 5)
             self.assertEqual(6, counter) # 6 = original attempt plus 5 retries
     
    diff --git a/test/unit/test_deps.py b/test/unit/test_deps.py
    index c758e53bda9..61bab359767 100644
    --- a/test/unit/test_deps.py
    +++ b/test/unit/test_deps.py
    @@ -133,7 +133,7 @@ def test_resolve_fail(self):
             self.assertEqual(c.git, 'http://example.com')
             self.assertEqual(c.revisions, ['0.0.1', '0.0.2'])
     
    -        with self.assertRaises(dbt.exceptions.DependencyException):
    +        with self.assertRaises(dbt.exceptions.DependencyError):
                 c.resolved()
     
         def test_default_revision(self):
    @@ -264,7 +264,7 @@ def test_resolve_missing_package(self):
                 package='dbt-labs-test/b',
                 version='0.1.2'
             ))
    -        with self.assertRaises(dbt.exceptions.DependencyException) as exc:
    +        with self.assertRaises(dbt.exceptions.DependencyError) as exc:
                 a.resolved()
     
             msg = 'Package dbt-labs-test/b was not found in the package index'
    @@ -276,7 +276,7 @@ def test_resolve_missing_version(self):
                 version='0.1.4'
             ))
     
    -        with self.assertRaises(dbt.exceptions.DependencyException) as exc:
    +        with self.assertRaises(dbt.exceptions.DependencyError) as exc:
                 a.resolved()
             msg = (
                 "Could not find a matching compatible version for package "
    @@ -298,7 +298,7 @@ def test_resolve_conflict(self):
             b = RegistryUnpinnedPackage.from_contract(b_contract)
             c = a.incorporate(b)
     
    -        with self.assertRaises(dbt.exceptions.DependencyException) as exc:
    +        with self.assertRaises(dbt.exceptions.DependencyError) as exc:
                 c.resolved()
             msg = (
                 "Version error for package dbt-labs-test/a: Could not "
    diff --git a/test/unit/test_exceptions.py b/test/unit/test_exceptions.py
    index 6a47255e13c..e66e913b1a6 100644
    --- a/test/unit/test_exceptions.py
    +++ b/test/unit/test_exceptions.py
    @@ -1,6 +1,6 @@
     import pytest
     
    -from dbt.exceptions import raise_duplicate_macro_name, CompilationException
    +from dbt.exceptions import raise_duplicate_macro_name, CompilationError
     from .utils import MockMacro
     
     
    @@ -8,7 +8,7 @@ def test_raise_duplicate_macros_different_package():
         macro_1 = MockMacro(package='dbt', name='some_macro')
         macro_2 = MockMacro(package='dbt-myadapter', name='some_macro')
     
    -    with pytest.raises(CompilationException) as exc:
    +    with pytest.raises(CompilationError) as exc:
             raise_duplicate_macro_name(
                 node_1=macro_1,
                 node_2=macro_2,
    @@ -24,7 +24,7 @@ def test_raise_duplicate_macros_same_package():
         macro_1 = MockMacro(package='dbt', name='some_macro')
         macro_2 = MockMacro(package='dbt', name='some_macro')
     
    -    with pytest.raises(CompilationException) as exc:
    +    with pytest.raises(CompilationError) as exc:
             raise_duplicate_macro_name(
                 node_1=macro_1,
                 node_2=macro_2,
    diff --git a/test/unit/test_flags.py b/test/unit/test_flags.py
    index 4be866338a2..6f03ec22e92 100644
    --- a/test/unit/test_flags.py
    +++ b/test/unit/test_flags.py
    @@ -1,8 +1,8 @@
     import os
    -from unittest import mock, TestCase
    +from unittest import TestCase
     from argparse import Namespace
    +import pytest
     
    -from .utils import normalize
     from dbt import flags
     from dbt.contracts.project import UserConfig
     from dbt.graph.selector_spec import IndirectSelection
    @@ -63,6 +63,21 @@ def test__flags(self):
             flags.WARN_ERROR = False
             self.user_config.warn_error = None
     
    +        # warn_error_options
    +        self.user_config.warn_error_options = '{"include": "all"}'
    +        flags.set_from_args(self.args, self.user_config)
    +        self.assertEqual(flags.WARN_ERROR_OPTIONS, '{"include": "all"}')
    +        os.environ['DBT_WARN_ERROR_OPTIONS'] = '{"include": []}'
    +        flags.set_from_args(self.args, self.user_config)
    +        self.assertEqual(flags.WARN_ERROR_OPTIONS, '{"include": []}')
    +        setattr(self.args, 'warn_error_options', '{"include": "all"}')
    +        flags.set_from_args(self.args, self.user_config)
    +        self.assertEqual(flags.WARN_ERROR_OPTIONS, '{"include": "all"}')
    +        # cleanup
    +        os.environ.pop('DBT_WARN_ERROR_OPTIONS')
    +        delattr(self.args, 'warn_error_options')
    +        self.user_config.warn_error_options = None
    +    
             # write_json
             self.user_config.write_json = True
             flags.set_from_args(self.args, self.user_config)
    @@ -206,6 +221,9 @@ def test__flags(self):
             self.user_config.indirect_selection = 'cautious'
             flags.set_from_args(self.args, self.user_config)
             self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Cautious)
    +        self.user_config.indirect_selection = 'buildable'
    +        flags.set_from_args(self.args, self.user_config)
    +        self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Buildable)
             self.user_config.indirect_selection = None
             flags.set_from_args(self.args, self.user_config)
             self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Eager)
    @@ -261,3 +279,59 @@ def test__flags(self):
             # cleanup
             os.environ.pop('DBT_LOG_PATH')
             delattr(self.args, 'log_path')
    +
    +    def test__flags_are_mutually_exclusive(self):
    +        # options from user config
    +        self.user_config.warn_error = False
    +        self.user_config.warn_error_options = '{"include":"all}'
    +        with pytest.raises(ValueError):
    +            flags.set_from_args(self.args, self.user_config)
    +        #cleanup
    +        self.user_config.warn_error = None
    +        self.user_config.warn_error_options = None
    +        
    +        # options from args
    +        setattr(self.args, 'warn_error', False)
    +        setattr(self.args, 'warn_error_options', '{"include":"all}')
    +        with pytest.raises(ValueError):
    +            flags.set_from_args(self.args, self.user_config)
    +        # cleanup
    +        delattr(self.args, 'warn_error')
    +        delattr(self.args, 'warn_error_options')
    +
    +        # options from environment
    +        os.environ['DBT_WARN_ERROR'] = 'false'
    +        os.environ['DBT_WARN_ERROR_OPTIONS'] = '{"include": []}'
    +        with pytest.raises(ValueError):
    +            flags.set_from_args(self.args, self.user_config)
    +        #cleanup
    +        os.environ.pop('DBT_WARN_ERROR')
    +        os.environ.pop('DBT_WARN_ERROR_OPTIONS')
    +
    +        # options from user config + args
    +        self.user_config.warn_error = False
    +        setattr(self.args, 'warn_error_options', '{"include":"all}')
    +        with pytest.raises(ValueError):
    +            flags.set_from_args(self.args, self.user_config)
    +        # cleanup
    +        self.user_config.warn_error = None
    +        delattr(self.args, 'warn_error_options')
    +        
    +        # options from user config + environ
    +        self.user_config.warn_error = False
    +        os.environ['DBT_WARN_ERROR_OPTIONS'] = '{"include": []}'
    +        with pytest.raises(ValueError):
    +            flags.set_from_args(self.args, self.user_config)
    +        # cleanup
    +        self.user_config.warn_error = None
    +        os.environ.pop('DBT_WARN_ERROR_OPTIONS')
    +
    +        # options from args + environ
    +        setattr(self.args, 'warn_error', False)
    +        os.environ['DBT_WARN_ERROR_OPTIONS'] = '{"include": []}'
    +        with pytest.raises(ValueError):
    +            flags.set_from_args(self.args, self.user_config)
    +        # cleanup
    +        delattr(self.args, 'warn_error')
    +        os.environ.pop('DBT_WARN_ERROR_OPTIONS')
    +
    diff --git a/test/unit/test_graph.py b/test/unit/test_graph.py
    index 90c0141d00a..36d2b9e69cd 100644
    --- a/test/unit/test_graph.py
    +++ b/test/unit/test_graph.py
    @@ -59,6 +59,7 @@ def setUp(self):
     
             # Create file filesystem searcher
             self.filesystem_search = patch('dbt.parser.read_files.filesystem_search')
    +
             def mock_filesystem_search(project, relative_dirs, extension, ignore_spec):
                 if 'sql' not in extension:
                     return []
    @@ -72,6 +73,7 @@ def mock_filesystem_search(project, relative_dirs, extension, ignore_spec):
             self.hook_patcher = patch.object(
                 dbt.parser.hooks.HookParser, '__new__'
             )
    +
             def create_hook_patcher(cls, project, manifest, root_project):
                 result = MagicMock(project=project, manifest=manifest, root_project=root_project)
                 result.__iter__.side_effect = lambda: iter([])
    @@ -82,7 +84,6 @@ def create_hook_patcher(cls, project, manifest, root_project):
             # Create the Manifest.state_check patcher
             @patch('dbt.parser.manifest.ManifestLoader.build_manifest_state_check')
             def _mock_state_check(self):
    -            config = self.root_project
                 all_projects = self.all_projects
                 return ManifestStateCheck(
                     project_env_vars_hash=FileHash.from_contents(''),
    @@ -98,6 +99,7 @@ def _mock_state_check(self):
             # Create the source file patcher
             self.load_source_file_patcher = patch('dbt.parser.read_files.load_source_file')
             self.mock_source_file = self.load_source_file_patcher.start()
    +
             def mock_load_source_file(path, parse_file_type, project_name, saved_files):
                 for sf in self.mock_models:
                     if sf.path == path:
    @@ -117,7 +119,6 @@ def _mock_hook_path(self):
                 )
                 return path
     
    -
         def get_config(self, extra_cfg=None):
             if extra_cfg is None:
                 extra_cfg = {}
    @@ -224,8 +225,6 @@ def test__model_materializations(self):
     
             config = self.get_config(cfg)
             manifest = self.load_manifest(config)
    -        compiler = self.get_compiler(config)
    -        linker = compiler.compile(manifest)
     
             expected_materialization = {
                 "model_one": "table",
    diff --git a/test/unit/test_graph_selection.py b/test/unit/test_graph_selection.py
    index e45fb4de15a..f702aede16b 100644
    --- a/test/unit/test_graph_selection.py
    +++ b/test/unit/test_graph_selection.py
    @@ -205,5 +205,5 @@ def test_parse_specs(spec, parents, parents_depth, children, children_depth, fil
     
     @pytest.mark.parametrize('invalid', invalid_specs, ids=lambda k: str(k))
     def test_invalid_specs(invalid):
    -    with pytest.raises(dbt.exceptions.RuntimeException):
    +    with pytest.raises(dbt.exceptions.DbtRuntimeError):
             graph_selector.SelectionCriteria.from_single_spec(invalid)
    diff --git a/test/unit/test_graph_selector_methods.py b/test/unit/test_graph_selector_methods.py
    index 0497d5da02a..769199e841f 100644
    --- a/test/unit/test_graph_selector_methods.py
    +++ b/test/unit/test_graph_selector_methods.py
    @@ -898,11 +898,11 @@ def test_select_state_no_change(manifest, previous_state):
     def test_select_state_nothing(manifest, previous_state):
         previous_state.manifest = None
         method = statemethod(manifest, previous_state)
    -    with pytest.raises(dbt.exceptions.RuntimeException) as exc:
    +    with pytest.raises(dbt.exceptions.DbtRuntimeError) as exc:
             search_manifest_using_method(manifest, method, 'modified')
         assert 'no comparison manifest' in str(exc.value)
     
    -    with pytest.raises(dbt.exceptions.RuntimeException) as exc:
    +    with pytest.raises(dbt.exceptions.DbtRuntimeError) as exc:
             search_manifest_using_method(manifest, method, 'new')
         assert 'no comparison manifest' in str(exc.value)
     
    @@ -973,8 +973,8 @@ def test_select_state_changed_seed_checksum_path_to_path(manifest, previous_stat
             assert not search_manifest_using_method(manifest, method, 'modified')
             warn_or_error_patch.assert_called_once()
             event = warn_or_error_patch.call_args[0][0]
    -        assert event.info.name == 'SeedExceedsLimitSamePath'
    -        msg = event.info.msg
    +        assert type(event).__name__ == 'SeedExceedsLimitSamePath'
    +        msg = event.message()
             assert msg.startswith('Found a seed (pkg.seed) >1MB in size')
         with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch:
             assert not search_manifest_using_method(manifest, method, 'new')
    @@ -990,8 +990,8 @@ def test_select_state_changed_seed_checksum_sha_to_path(manifest, previous_state
                 manifest, method, 'modified') == {'seed'}
             warn_or_error_patch.assert_called_once()
             event = warn_or_error_patch.call_args[0][0]
    -        assert event.info.name == 'SeedIncreased'
    -        msg = event.info.msg
    +        assert type(event).__name__ == 'SeedIncreased'
    +        msg = event.message()
             assert msg.startswith('Found a seed (pkg.seed) >1MB in size')
         with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch:
             assert not search_manifest_using_method(manifest, method, 'new')
    diff --git a/test/unit/test_graph_selector_spec.py b/test/unit/test_graph_selector_spec.py
    index 68c8611ccac..d72325affc2 100644
    --- a/test/unit/test_graph_selector_spec.py
    +++ b/test/unit/test_graph_selector_spec.py
    @@ -1,6 +1,6 @@
     import pytest
     
    -from dbt.exceptions import RuntimeException
    +from dbt.exceptions import DbtRuntimeError
     from dbt.graph.selector_spec import (
         SelectionCriteria,
         SelectionIntersection,
    @@ -111,10 +111,10 @@ def test_raw_parse_weird():
     
     
     def test_raw_parse_invalid():
    -    with pytest.raises(RuntimeException):
    +    with pytest.raises(DbtRuntimeError):
             SelectionCriteria.from_single_spec('invalid_method:something')
     
    -    with pytest.raises(RuntimeException):
    +    with pytest.raises(DbtRuntimeError):
             SelectionCriteria.from_single_spec('@foo+')
     
     
    diff --git a/test/unit/test_jinja.py b/test/unit/test_jinja.py
    index 6b8c939de64..5213f8d7d8c 100644
    --- a/test/unit/test_jinja.py
    +++ b/test/unit/test_jinja.py
    @@ -6,7 +6,7 @@
     from dbt.clients.jinja import get_rendered
     from dbt.clients.jinja import get_template
     from dbt.clients.jinja import extract_toplevel_blocks
    -from dbt.exceptions import CompilationException, JinjaRenderingException
    +from dbt.exceptions import CompilationError, JinjaRenderingError
     
     
     @contextmanager
    @@ -55,12 +55,12 @@ def expected_id(arg):
         (
             '''foo: "{{ 'bar' | as_bool }}"''',
             returns('bar'),
    -        raises(JinjaRenderingException),
    +        raises(JinjaRenderingError),
         ),
         (
             '''foo: "{{ 'bar' | as_number }}"''',
             returns('bar'),
    -        raises(JinjaRenderingException),
    +        raises(JinjaRenderingError),
         ),
         (
             '''foo: "{{ 'bar' | as_native }}"''',
    @@ -116,7 +116,7 @@ def expected_id(arg):
         (
             '''foo: "{{ 1 | as_bool }}"''',
             returns('1'),
    -        raises(JinjaRenderingException),
    +        raises(JinjaRenderingError),
         ),
         (
             '''foo: "{{ 1 | as_number }}"''',
    @@ -136,7 +136,7 @@ def expected_id(arg):
         (
             '''foo: "{{ '1' | as_bool }}"''',
             returns('1'),
    -        raises(JinjaRenderingException),
    +        raises(JinjaRenderingError),
         ),
         (
             '''foo: "{{ '1' | as_number }}"''',
    @@ -171,7 +171,7 @@ def expected_id(arg):
         (
             '''foo: "{{ True | as_number }}"''',
             returns('True'),
    -        raises(JinjaRenderingException),
    +        raises(JinjaRenderingError),
         ),
         (
             '''foo: "{{ True | as_native }}"''',
    @@ -197,7 +197,7 @@ def expected_id(arg):
         (
             '''foo: "{{ true | as_number }}"''',
             returns("True"),
    -        raises(JinjaRenderingException),
    +        raises(JinjaRenderingError),
         ),
         (
             '''foo: "{{ true | as_native }}"''',
    @@ -254,7 +254,7 @@ def expected_id(arg):
         (
             '''foo: "{{ True | as_number }}"''',
             returns("True"),
    -        raises(JinjaRenderingException),
    +        raises(JinjaRenderingError),
         ),
         (
             '''foo: "{{ True | as_native }}"''',
    @@ -552,24 +552,24 @@ def test_materialization_parse(self):
         def test_nested_not_ok(self):
             # we don't allow nesting same blocks
             body = '{% myblock a %} {% myblock b %} {% endmyblock %} {% endmyblock %}'
    -        with self.assertRaises(CompilationException):
    +        with self.assertRaises(CompilationError):
                 extract_toplevel_blocks(body, allowed_blocks={'myblock'})
     
         def test_incomplete_block_failure(self):
             fullbody = '{% myblock foo %} {% endmyblock %}'
             for length in range(len('{% myblock foo %}'), len(fullbody)-1):
                 body = fullbody[:length]
    -            with self.assertRaises(CompilationException):
    +            with self.assertRaises(CompilationError):
                     extract_toplevel_blocks(body, allowed_blocks={'myblock'})
     
         def test_wrong_end_failure(self):
             body = '{% myblock foo %} {% endotherblock %}'
    -        with self.assertRaises(CompilationException):
    +        with self.assertRaises(CompilationError):
                 extract_toplevel_blocks(body, allowed_blocks={'myblock', 'otherblock'})
     
         def test_comment_no_end_failure(self):
             body = '{# '
    -        with self.assertRaises(CompilationException):
    +        with self.assertRaises(CompilationError):
                 extract_toplevel_blocks(body)
     
         def test_comment_only(self):
    @@ -698,7 +698,7 @@ def test_unclosed_model_quotes(self):
         def test_if(self):
             # if you conditionally define your macros/models, don't
             body = '{% if true %}{% macro my_macro() %} adsf {% endmacro %}{% endif %}'
    -        with self.assertRaises(CompilationException):
    +        with self.assertRaises(CompilationError):
                 extract_toplevel_blocks(body)
     
         def test_if_innocuous(self):
    @@ -710,7 +710,7 @@ def test_if_innocuous(self):
         def test_for(self):
             # no for-loops over macros.
             body = '{% for x in range(10) %}{% macro my_macro() %} adsf {% endmacro %}{% endfor %}'
    -        with self.assertRaises(CompilationException):
    +        with self.assertRaises(CompilationError):
                 extract_toplevel_blocks(body)
     
         def test_for_innocuous(self):
    @@ -722,19 +722,19 @@ def test_for_innocuous(self):
     
         def test_endif(self):
             body = '{% snapshot foo %}select * from thing{% endsnapshot%}{% endif %}'
    -        with self.assertRaises(CompilationException) as err:
    +        with self.assertRaises(CompilationError) as err:
                 extract_toplevel_blocks(body)
             self.assertIn('Got an unexpected control flow end tag, got endif but never saw a preceeding if (@ 1:53)', str(err.exception))
     
         def test_if_endfor(self):
             body = '{% if x %}...{% endfor %}{% endif %}'
    -        with self.assertRaises(CompilationException) as err:
    +        with self.assertRaises(CompilationError) as err:
                 extract_toplevel_blocks(body)
             self.assertIn('Got an unexpected control flow end tag, got endfor but expected endif next (@ 1:13)', str(err.exception))
     
         def test_if_endfor_newlines(self):
             body = '{% if x %}\n    ...\n    {% endfor %}\n{% endif %}'
    -        with self.assertRaises(CompilationException) as err:
    +        with self.assertRaises(CompilationError) as err:
                 extract_toplevel_blocks(body)
             self.assertIn('Got an unexpected control flow end tag, got endfor but expected endif next (@ 3:4)', str(err.exception))
     
    diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py
    index 19800b7c798..8e79e996ce8 100644
    --- a/test/unit/test_parser.py
    +++ b/test/unit/test_parser.py
    @@ -18,7 +18,7 @@
         ModelNode, Macro, DependsOn, SingularTestNode, SnapshotNode,
         AnalysisNode, UnpatchedSourceDefinition
     )
    -from dbt.exceptions import CompilationException, ParsingException
    +from dbt.exceptions import CompilationError, ParsingError
     from dbt.node_types import NodeType
     from dbt.parser import (
         ModelParser, MacroParser, SingularTestParser, GenericTestParser,
    @@ -664,7 +664,7 @@ def test_basic(self):
     
         def test_sql_model_parse_error(self):
             block = self.file_block_for(sql_model_parse_error, 'nested/model_1.sql')
    -        with self.assertRaises(CompilationException):
    +        with self.assertRaises(CompilationError):
                 self.parser.parse_file(block)
     
         def test_python_model_parse(self):
    @@ -724,31 +724,31 @@ def test_python_model_config_with_defaults(self):
         def test_python_model_single_argument(self):
             block = self.file_block_for(python_model_single_argument, 'nested/py_model.py')
             self.parser.manifest.files[block.file.file_id] = block.file
    -        with self.assertRaises(ParsingException):
    +        with self.assertRaises(ParsingError):
                 self.parser.parse_file(block)
     
         def test_python_model_no_argument(self):
             block = self.file_block_for(python_model_no_argument, 'nested/py_model.py')
             self.parser.manifest.files[block.file.file_id] = block.file
    -        with self.assertRaises(ParsingException):
    +        with self.assertRaises(ParsingError):
                 self.parser.parse_file(block)
     
         def test_python_model_incorrect_argument_name(self):
             block = self.file_block_for(python_model_incorrect_argument_name, 'nested/py_model.py')
             self.parser.manifest.files[block.file.file_id] = block.file
    -        with self.assertRaises(ParsingException):
    +        with self.assertRaises(ParsingError):
                 self.parser.parse_file(block)
     
         def test_python_model_multiple_models(self):
             block = self.file_block_for(python_model_multiple_models, 'nested/py_model.py')
             self.parser.manifest.files[block.file.file_id] = block.file
    -        with self.assertRaises(ParsingException):
    +        with self.assertRaises(ParsingError):
                 self.parser.parse_file(block)
     
         def test_python_model_incorrect_function_name(self):
             block = self.file_block_for(python_model_incorrect_function_name, 'nested/py_model.py')
             self.parser.manifest.files[block.file.file_id] = block.file
    -        with self.assertRaises(ParsingException):
    +        with self.assertRaises(ParsingError):
                 self.parser.parse_file(block)
     
         def test_python_model_empty_file(self):
    @@ -759,13 +759,13 @@ def test_python_model_empty_file(self):
         def test_python_model_multiple_returns(self):
             block = self.file_block_for(python_model_multiple_returns, 'nested/py_model.py')
             self.parser.manifest.files[block.file.file_id] = block.file
    -        with self.assertRaises(ParsingException):
    +        with self.assertRaises(ParsingError):
                 self.parser.parse_file(block)
     
         def test_python_model_no_return(self):
             block = self.file_block_for(python_model_no_return, 'nested/py_model.py')
             self.parser.manifest.files[block.file.file_id] = block.file
    -        with self.assertRaises(ParsingException):
    +        with self.assertRaises(ParsingError):
                 self.parser.parse_file(block)
     
         def test_python_model_single_return(self):
    @@ -776,7 +776,7 @@ def test_python_model_single_return(self):
         def test_python_model_incorrect_ref(self):
             block = self.file_block_for(python_model_incorrect_ref, 'nested/py_model.py')
             self.parser.manifest.files[block.file.file_id] = block.file
    -        with self.assertRaises(ParsingException):
    +        with self.assertRaises(ParsingError):
                 self.parser.parse_file(block)
     
         def test_python_model_default_materialization(self):
    @@ -1027,7 +1027,7 @@ def file_block_for(self, data, filename):
         def test_parse_error(self):
             block = self.file_block_for('{% snapshot foo %}select 1 as id{%snapshot bar %}{% endsnapshot %}',
                                         'nested/snap_1.sql')
    -        with self.assertRaises(CompilationException):
    +        with self.assertRaises(CompilationError):
                 self.parser.parse_file(block)
     
         def test_single_block(self):
    diff --git a/test/unit/test_postgres_adapter.py b/test/unit/test_postgres_adapter.py
    index 06a2ed7c497..0d56ff9ff63 100644
    --- a/test/unit/test_postgres_adapter.py
    +++ b/test/unit/test_postgres_adapter.py
    @@ -12,7 +12,7 @@
     from dbt.contracts.files import FileHash
     from dbt.contracts.graph.manifest import ManifestStateCheck
     from dbt.clients import agate_helper
    -from dbt.exceptions import ValidationException, DbtConfigError
    +from dbt.exceptions import DbtValidationError, DbtConfigError
     from psycopg2 import extensions as psycopg2_extensions
     from psycopg2 import DatabaseError
     
    @@ -58,8 +58,8 @@ def adapter(self):
         def test_acquire_connection_validations(self, psycopg2):
             try:
                 connection = self.adapter.acquire_connection('dummy')
    -        except ValidationException as e:
    -            self.fail('got ValidationException: {}'.format(str(e)))
    +        except DbtValidationError as e:
    +            self.fail('got DbtValidationError: {}'.format(str(e)))
             except BaseException as e:
                 self.fail('acquiring connection failed with unknown exception: {}'
                           .format(str(e)))
    diff --git a/test/unit/test_registry_get_request_exception.py b/test/unit/test_registry_get_request_exception.py
    index 44033fe0546..3029971cad4 100644
    --- a/test/unit/test_registry_get_request_exception.py
    +++ b/test/unit/test_registry_get_request_exception.py
    @@ -1,9 +1,9 @@
     import unittest
     
    -from dbt.exceptions import ConnectionException
    +from dbt.exceptions import ConnectionError
     from dbt.clients.registry import _get_with_retries
     
     class testRegistryGetRequestException(unittest.TestCase):
         def test_registry_request_error_catching(self):
             # using non routable IP to test connection error logic in the _get_with_retries function
    -        self.assertRaises(ConnectionException, _get_with_retries, '', 'http://0.0.0.0')
    +        self.assertRaises(ConnectionError, _get_with_retries, '', 'http://0.0.0.0')
    diff --git a/test/unit/test_semver.py b/test/unit/test_semver.py
    index eff7603a2f6..b36c403e3a7 100644
    --- a/test/unit/test_semver.py
    +++ b/test/unit/test_semver.py
    @@ -2,7 +2,7 @@
     import itertools
     
     from typing import List
    -from dbt.exceptions import VersionsNotCompatibleException
    +from dbt.exceptions import VersionsNotCompatibleError
     from dbt.semver import VersionSpecifier, UnboundedVersionSpecifier, \
         VersionRange, reduce_versions, versions_compatible, \
         resolve_to_specific_version, filter_installable
    @@ -40,7 +40,7 @@ def assertVersionSetResult(self, inputs, output_range):
     
         def assertInvalidVersionSet(self, inputs):
             for permutation in itertools.permutations(inputs):
    -            with self.assertRaises(VersionsNotCompatibleException):
    +            with self.assertRaises(VersionsNotCompatibleError):
                     reduce_versions(*permutation)
     
         def test__versions_compatible(self):
    diff --git a/tests/adapter/dbt/tests/adapter/__version__.py b/tests/adapter/dbt/tests/adapter/__version__.py
    index 27cfeecd9e8..219c289b1bf 100644
    --- a/tests/adapter/dbt/tests/adapter/__version__.py
    +++ b/tests/adapter/dbt/tests/adapter/__version__.py
    @@ -1 +1 @@
    -version = "1.4.0b1"
    +version = "1.5.0a1"
    diff --git a/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py b/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py
    index a9f846e2ca4..d9ff6b5b28f 100644
    --- a/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py
    +++ b/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py
    @@ -50,10 +50,7 @@ def models(self):
     
         @pytest.fixture(scope="class")
         def macros(self):
    -        return {
    -            "cast.sql": MACROS__CAST_SQL,
    -            "expect_value.sql": MACROS__EXPECT_VALUE_SQL
    -        }
    +        return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL}
     
         def test_alias_model_name(self, project):
             results = run_dbt(["run"])
    @@ -71,10 +68,7 @@ def project_config_update(self):
     
         @pytest.fixture(scope="class")
         def macros(self):
    -        return {
    -            "cast.sql": MACROS__CAST_SQL,
    -            "expect_value.sql": MACROS__EXPECT_VALUE_SQL
    -        }
    +        return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL}
     
         @pytest.fixture(scope="class")
         def models(self):
    @@ -100,10 +94,7 @@ def project_config_update(self):
     
         @pytest.fixture(scope="class")
         def macros(self):
    -        return {
    -            "cast.sql": MACROS__CAST_SQL,
    -            "expect_value.sql": MACROS__EXPECT_VALUE_SQL
    -        }
    +        return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL}
     
         @pytest.fixture(scope="class")
         def models(self):
    @@ -130,19 +121,14 @@ def project_config_update(self, unique_schema):
                 "models": {
                     "test": {
                         "alias": "duped_alias",
    -                    "model_b": {
    -                        "schema": unique_schema + "_alt"
    -                    },
    +                    "model_b": {"schema": unique_schema + "_alt"},
                     },
                 },
             }
     
         @pytest.fixture(scope="class")
         def macros(self):
    -        return {
    -            "cast.sql": MACROS__CAST_SQL,
    -            "expect_value.sql": MACROS__EXPECT_VALUE_SQL
    -        }
    +        return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL}
     
         @pytest.fixture(scope="class")
         def models(self):
    diff --git a/tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py b/tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py
    new file mode 100644
    index 00000000000..279152d6985
    --- /dev/null
    +++ b/tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py
    @@ -0,0 +1,96 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt, check_relations_equal
    +
    +
    +seeds__seed_csv = """id,first_name,last_name,email,gender,ip_address
    +1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168
    +2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35
    +3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243
    +4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175
    +5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136
    +6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220
    +7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64
    +8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13
    +9,Gary,Day,gday8@nih.gov,Male,35.81.68.186
    +10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100
    +11,Raymond,Kelley,rkelleya@fc2.com,Male,213.65.166.67
    +12,Gerald,Robinson,grobinsonb@disqus.com,Male,72.232.194.193
    +13,Mildred,Martinez,mmartinezc@samsung.com,Female,198.29.112.5
    +14,Dennis,Arnold,darnoldd@google.com,Male,86.96.3.250
    +15,Judy,Gray,jgraye@opensource.org,Female,79.218.162.245
    +16,Theresa,Garza,tgarzaf@epa.gov,Female,21.59.100.54
    +17,Gerald,Robertson,grobertsong@csmonitor.com,Male,131.134.82.96
    +18,Philip,Hernandez,phernandezh@adobe.com,Male,254.196.137.72
    +19,Julia,Gonzalez,jgonzalezi@cam.ac.uk,Female,84.240.227.174
    +20,Andrew,Davis,adavisj@patch.com,Male,9.255.67.25
    +21,Kimberly,Harper,kharperk@foxnews.com,Female,198.208.120.253
    +22,Mark,Martin,mmartinl@marketwatch.com,Male,233.138.182.153
    +23,Cynthia,Ruiz,cruizm@google.fr,Female,18.178.187.201
    +24,Samuel,Carroll,scarrolln@youtu.be,Male,128.113.96.122
    +25,Jennifer,Larson,jlarsono@vinaora.com,Female,98.234.85.95
    +26,Ashley,Perry,aperryp@rakuten.co.jp,Female,247.173.114.52
    +27,Howard,Rodriguez,hrodriguezq@shutterfly.com,Male,231.188.95.26
    +28,Amy,Brooks,abrooksr@theatlantic.com,Female,141.199.174.118
    +29,Louise,Warren,lwarrens@adobe.com,Female,96.105.158.28
    +30,Tina,Watson,twatsont@myspace.com,Female,251.142.118.177
    +31,Janice,Kelley,jkelleyu@creativecommons.org,Female,239.167.34.233
    +32,Terry,Mccoy,tmccoyv@bravesites.com,Male,117.201.183.203
    +33,Jeffrey,Morgan,jmorganw@surveymonkey.com,Male,78.101.78.149
    +34,Louis,Harvey,lharveyx@sina.com.cn,Male,51.50.0.167
    +35,Philip,Miller,pmillery@samsung.com,Male,103.255.222.110
    +36,Willie,Marshall,wmarshallz@ow.ly,Male,149.219.91.68
    +37,Patrick,Lopez,plopez10@redcross.org,Male,250.136.229.89
    +38,Adam,Jenkins,ajenkins11@harvard.edu,Male,7.36.112.81
    +39,Benjamin,Cruz,bcruz12@linkedin.com,Male,32.38.98.15
    +40,Ruby,Hawkins,rhawkins13@gmpg.org,Female,135.171.129.255
    +41,Carlos,Barnes,cbarnes14@a8.net,Male,240.197.85.140
    +42,Ruby,Griffin,rgriffin15@bravesites.com,Female,19.29.135.24
    +43,Sean,Mason,smason16@icq.com,Male,159.219.155.249
    +44,Anthony,Payne,apayne17@utexas.edu,Male,235.168.199.218
    +45,Steve,Cruz,scruz18@pcworld.com,Male,238.201.81.198
    +46,Anthony,Garcia,agarcia19@flavors.me,Male,25.85.10.18
    +47,Doris,Lopez,dlopez1a@sphinn.com,Female,245.218.51.238
    +48,Susan,Nichols,snichols1b@freewebs.com,Female,199.99.9.61
    +49,Wanda,Ferguson,wferguson1c@yahoo.co.jp,Female,236.241.135.21
    +50,Andrea,Pierce,apierce1d@google.co.uk,Female,132.40.10.209
    +"""
    +
    +model_sql = """
    +{{
    +  config(
    +    materialized = "table",
    +    sort = 'first_name',
    +    dist = 'first_name'
    +  )
    +}}
    +
    +select * from {{ this.schema }}.seed
    +"""
    +
    +
    +class BaseTableMaterialization:
    +    @pytest.fixture(scope="class")
    +    def seeds(self):
    +        return {"seed.csv": seeds__seed_csv}
    +
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"materialized.sql": model_sql}
    +
    +    def test_table_materialization_sort_dist_no_op(self, project):
    +        # basic table materialization test, sort and dist is not supported by postgres so the result table would still be same as input
    +
    +        # check seed
    +        results = run_dbt(["seed"])
    +        assert len(results) == 1
    +
    +        # check run
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +
    +        check_relations_equal(project.adapter, ["seed", "materialized"])
    +
    +
    +class TestTableMat(BaseTableMaterialization):
    +    pass
    diff --git a/tests/adapter/dbt/tests/adapter/caching/test_caching.py b/tests/adapter/dbt/tests/adapter/caching/test_caching.py
    new file mode 100644
    index 00000000000..9cf02309c4c
    --- /dev/null
    +++ b/tests/adapter/dbt/tests/adapter/caching/test_caching.py
    @@ -0,0 +1,103 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt
    +
    +model_sql = """
    +{{
    +    config(
    +        materialized='table'
    +    )
    +}}
    +select 1 as id
    +"""
    +
    +another_schema_model_sql = """
    +{{
    +    config(
    +        materialized='table',
    +        schema='another_schema'
    +    )
    +}}
    +select 1 as id
    +"""
    +
    +
    +class BaseCachingTest:
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        return {
    +            "config-version": 2,
    +            "quoting": {
    +                "identifier": False,
    +                "schema": False,
    +            },
    +        }
    +
    +    def run_and_inspect_cache(self, project, run_args=None):
    +        run_dbt(run_args)
    +
    +        # the cache was empty at the start of the run.
    +        # the model materialization returned an unquoted relation and added to the cache.
    +        adapter = project.adapter
    +        assert len(adapter.cache.relations) == 1
    +        relation = list(adapter.cache.relations).pop()
    +        assert relation.schema == project.test_schema
    +        assert relation.schema == project.test_schema.lower()
    +
    +        # on the second run, dbt will find a relation in the database during cache population.
    +        # this relation will be quoted, because list_relations_without_caching (by default) uses
    +        # quote_policy = {"database": True, "schema": True, "identifier": True}
    +        # when adding relations to the cache.
    +        run_dbt(run_args)
    +        adapter = project.adapter
    +        assert len(adapter.cache.relations) == 1
    +        second_relation = list(adapter.cache.relations).pop()
    +
    +        # perform a case-insensitive + quote-insensitive comparison
    +        for key in ["database", "schema", "identifier"]:
    +            assert getattr(relation, key).lower() == getattr(second_relation, key).lower()
    +
    +    def test_cache(self, project):
    +        self.run_and_inspect_cache(project, run_args=["run"])
    +
    +
    +class BaseCachingLowercaseModel(BaseCachingTest):
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model.sql": model_sql,
    +        }
    +
    +
    +class BaseCachingUppercaseModel(BaseCachingTest):
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "MODEL.sql": model_sql,
    +        }
    +
    +
    +class BaseCachingSelectedSchemaOnly(BaseCachingTest):
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model.sql": model_sql,
    +            "another_schema_model.sql": another_schema_model_sql,
    +        }
    +
    +    def test_cache(self, project):
    +        # this should only cache the schema containing the selected model
    +        run_args = ["--cache-selected-only", "run", "--select", "model"]
    +        self.run_and_inspect_cache(project, run_args)
    +
    +
    +class TestCachingLowerCaseModel(BaseCachingLowercaseModel):
    +    pass
    +
    +
    +class TestCachingUppercaseModel(BaseCachingUppercaseModel):
    +    pass
    +
    +
    +class TestCachingSelectedSchemaOnly(BaseCachingSelectedSchemaOnly):
    +    pass
    diff --git a/test/integration/056_column_type_tests/macros/test_is_type.sql b/tests/adapter/dbt/tests/adapter/column_types/fixtures.py
    similarity index 73%
    rename from test/integration/056_column_type_tests/macros/test_is_type.sql
    rename to tests/adapter/dbt/tests/adapter/column_types/fixtures.py
    index 2f1ffde2b1e..97a61c2b6f5 100644
    --- a/test/integration/056_column_type_tests/macros/test_is_type.sql
    +++ b/tests/adapter/dbt/tests/adapter/column_types/fixtures.py
    @@ -1,4 +1,13 @@
    +# macros
    +macro_test_alter_column_type = """
    +-- Macro to alter a column type
    +{% macro test_alter_column_type(model_name, column_name, new_column_type) %}
    +  {% set relation = ref(model_name) %}
    +  {{ alter_column_type(relation, column_name, new_column_type) }}
    +{% endmacro %}
    +"""
     
    +macro_test_is_type_sql = """
     {% macro simple_type_check_column(column, check) %}
         {% if check == 'string' %}
             {{ return(column.is_string()) }}
    @@ -70,3 +79,35 @@
         {% endfor %}
           select * from (select 1 limit 0) as nothing
     {% endtest %}
    +"""
    +
    +# models/schema
    +
    +model_sql = """
    +select
    +    1::smallint as smallint_col,
    +    2::integer as int_col,
    +    3::bigint as bigint_col,
    +    4.0::real as real_col,
    +    5.0::double precision as double_col,
    +    6.0::numeric as numeric_col,
    +    '7'::text as text_col,
    +    '8'::varchar(20) as varchar_col
    +"""
    +
    +schema_yml = """
    +version: 2
    +models:
    +  - name: model
    +    tests:
    +      - is_type:
    +          column_map:
    +            smallint_col: ['integer', 'number']
    +            int_col: ['integer', 'number']
    +            bigint_col: ['integer', 'number']
    +            real_col: ['float', 'number']
    +            double_col: ['float', 'number']
    +            numeric_col: ['numeric', 'number']
    +            text_col: ['string', 'not number']
    +            varchar_col: ['string', 'not number']
    +"""
    diff --git a/tests/adapter/dbt/tests/adapter/column_types/test_column_types.py b/tests/adapter/dbt/tests/adapter/column_types/test_column_types.py
    new file mode 100644
    index 00000000000..cc213d36a4b
    --- /dev/null
    +++ b/tests/adapter/dbt/tests/adapter/column_types/test_column_types.py
    @@ -0,0 +1,24 @@
    +import pytest
    +from dbt.tests.util import run_dbt
    +from dbt.tests.adapter.column_types.fixtures import macro_test_is_type_sql, model_sql, schema_yml
    +
    +
    +class BaseColumnTypes:
    +    @pytest.fixture(scope="class")
    +    def macros(self):
    +        return {"test_is_type.sql": macro_test_is_type_sql}
    +
    +    def run_and_test(self):
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +        results = run_dbt(["test"])
    +        assert len(results) == 1
    +
    +
    +class TestPostgresColumnTypes(BaseColumnTypes):
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"model.sql": model_sql, "schema.yml": schema_yml}
    +
    +    def test_run_and_test(self, project):
    +        self.run_and_test()
    diff --git a/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py b/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py
    index b7b0ff9ac17..8d3fd7751f2 100644
    --- a/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py
    +++ b/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py
    @@ -21,7 +21,7 @@ def capsys(self, capsys):
         def assertGotValue(self, linepat, result):
             found = False
             output = self.capsys.readouterr().out
    -        for line in output.split('\n'):
    +        for line in output.split("\n"):
                 if linepat.match(line):
                     found = True
                     assert result in line
    @@ -41,10 +41,7 @@ def check_project(self, splitout, msg="ERROR invalid"):
     class BaseDebugProfileVariable(BaseDebug):
         @pytest.fixture(scope="class")
         def project_config_update(self):
    -        return {
    -            "config-version": 2,
    -            "profile": '{{ "te" ~ "st" }}'
    -        }
    +        return {"config-version": 2, "profile": '{{ "te" ~ "st" }}'}
     
     
     class TestDebugPostgres(BaseDebug):
    @@ -70,7 +67,6 @@ class TestDebugProfileVariablePostgres(BaseDebugProfileVariable):
     
     
     class TestDebugInvalidProjectPostgres(BaseDebug):
    -
         def test_empty_project(self, project):
             with open("dbt_project.yml", "w") as f:  # noqa: F841
                 pass
    @@ -96,9 +92,7 @@ def test_not_found_project(self, project):
     
         def test_invalid_project_outside_current_dir(self, project):
             # create a dbt_project.yml
    -        project_config = {
    -            "invalid-key": "not a valid key in this project"
    -        }
    +        project_config = {"invalid-key": "not a valid key in this project"}
             os.makedirs("custom", exist_ok=True)
             with open("custom/dbt_project.yml", "w") as f:
                 yaml.safe_dump(project_config, f, default_flow_style=True)
    diff --git a/tests/adapter/dbt/tests/adapter/incremental/fixtures.py b/tests/adapter/dbt/tests/adapter/incremental/fixtures.py
    new file mode 100644
    index 00000000000..6e130266df2
    --- /dev/null
    +++ b/tests/adapter/dbt/tests/adapter/incremental/fixtures.py
    @@ -0,0 +1,305 @@
    +#
    +# Models
    +#
    +_MODELS__INCREMENTAL_SYNC_REMOVE_ONLY = """
    +{{
    +    config(
    +        materialized='incremental',
    +        unique_key='id',
    +        on_schema_change='sync_all_columns'
    +
    +    )
    +}}
    +
    +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} )
    +
    +{% set string_type = dbt.type_string() %}
    +
    +{% if is_incremental() %}
    +
    +SELECT id,
    +       cast(field1 as {{string_type}}) as field1
    +
    +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} )
    +
    +{% else %}
    +
    +select id,
    +       cast(field1 as {{string_type}}) as field1,
    +       cast(field2 as {{string_type}}) as field2
    +
    +from source_data where id <= 3
    +
    +{% endif %}
    +"""
    +
    +_MODELS__INCREMENTAL_IGNORE = """
    +{{
    +    config(
    +        materialized='incremental',
    +        unique_key='id',
    +        on_schema_change='ignore'
    +    )
    +}}
    +
    +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} )
    +
    +{% if is_incremental() %}
    +
    +SELECT id, field1, field2, field3, field4 FROM source_data WHERE id NOT IN (SELECT id from {{ this }} )
    +
    +{% else %}
    +
    +SELECT id, field1, field2 FROM source_data LIMIT 3
    +
    +{% endif %}
    +"""
    +
    +_MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET = """
    +{{
    +    config(materialized='table')
    +}}
    +
    +with source_data as (
    +
    +    select * from {{ ref('model_a') }}
    +
    +)
    +
    +{% set string_type = dbt.type_string() %}
    +
    +select id
    +       ,cast(field1 as {{string_type}}) as field1
    +
    +from source_data
    +order by id
    +"""
    +
    +_MODELS__INCREMENTAL_IGNORE_TARGET = """
    +{{
    +    config(materialized='table')
    +}}
    +
    +with source_data as (
    +
    +    select * from {{ ref('model_a') }}
    +
    +)
    +
    +select id
    +       ,field1
    +       ,field2
    +
    +from source_data
    +"""
    +
    +_MODELS__INCREMENTAL_FAIL = """
    +{{
    +    config(
    +        materialized='incremental',
    +        unique_key='id',
    +        on_schema_change='fail'
    +    )
    +}}
    +
    +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} )
    +
    +{% if is_incremental()  %}
    +
    +SELECT id, field1, field2 FROM source_data
    +
    +{% else %}
    +
    +SELECT id, field1, field3 FROm source_data
    +
    +{% endif %}
    +"""
    +
    +_MODELS__INCREMENTAL_SYNC_ALL_COLUMNS = """
    +{{
    +    config(
    +        materialized='incremental',
    +        unique_key='id',
    +        on_schema_change='sync_all_columns'
    +
    +    )
    +}}
    +
    +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} )
    +
    +{% set string_type = dbt.type_string() %}
    +
    +{% if is_incremental() %}
    +
    +SELECT id,
    +       cast(field1 as {{string_type}}) as field1,
    +       cast(field3 as {{string_type}}) as field3, -- to validate new fields
    +       cast(field4 as {{string_type}}) AS field4 -- to validate new fields
    +
    +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} )
    +
    +{% else %}
    +
    +select id,
    +       cast(field1 as {{string_type}}) as field1,
    +       cast(field2 as {{string_type}}) as field2
    +
    +from source_data where id <= 3
    +
    +{% endif %}
    +"""
    +
    +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE = """
    +{{
    +    config(
    +        materialized='incremental',
    +        unique_key='id',
    +        on_schema_change='append_new_columns'
    +    )
    +}}
    +
    +{% set string_type = dbt.type_string() %}
    +
    +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} )
    +
    +{% if is_incremental()  %}
    +
    +SELECT id,
    +       cast(field1 as {{string_type}}) as field1,
    +       cast(field3 as {{string_type}}) as field3,
    +       cast(field4 as {{string_type}}) as field4
    +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} )
    +
    +{% else %}
    +
    +SELECT id,
    +       cast(field1 as {{string_type}}) as field1,
    +       cast(field2 as {{string_type}}) as field2
    +FROM source_data where id <= 3
    +
    +{% endif %}
    +"""
    +
    +_MODELS__A = """
    +{{
    +    config(materialized='table')
    +}}
    +
    +with source_data as (
    +
    +    select 1 as id, 'aaa' as field1, 'bbb' as field2, 111 as field3, 'TTT' as field4
    +    union all select 2 as id, 'ccc' as field1, 'ddd' as field2, 222 as field3, 'UUU' as field4
    +    union all select 3 as id, 'eee' as field1, 'fff' as field2, 333 as field3, 'VVV' as field4
    +    union all select 4 as id, 'ggg' as field1, 'hhh' as field2, 444 as field3, 'WWW' as field4
    +    union all select 5 as id, 'iii' as field1, 'jjj' as field2, 555 as field3, 'XXX' as field4
    +    union all select 6 as id, 'kkk' as field1, 'lll' as field2, 666 as field3, 'YYY' as field4
    +
    +)
    +
    +select id
    +       ,field1
    +       ,field2
    +       ,field3
    +       ,field4
    +
    +from source_data
    +"""
    +
    +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET = """
    +{{
    +    config(materialized='table')
    +}}
    +
    +{% set string_type = dbt.type_string() %}
    +
    +with source_data as (
    +
    +    select * from {{ ref('model_a') }}
    +
    +)
    +
    +select id
    +       ,cast(field1 as {{string_type}}) as field1
    +       ,cast(field2 as {{string_type}}) as field2
    +       ,cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3
    +       ,cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4
    +
    +from source_data
    +"""
    +
    +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS = """
    +{{
    +    config(
    +        materialized='incremental',
    +        unique_key='id',
    +        on_schema_change='append_new_columns'
    +    )
    +}}
    +
    +{% set string_type = dbt.type_string() %}
    +
    +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} )
    +
    +{% if is_incremental()  %}
    +
    +SELECT id,
    +       cast(field1 as {{string_type}}) as field1,
    +       cast(field2 as {{string_type}}) as field2,
    +       cast(field3 as {{string_type}}) as field3,
    +       cast(field4 as {{string_type}}) as field4
    +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} )
    +
    +{% else %}
    +
    +SELECT id,
    +       cast(field1 as {{string_type}}) as field1,
    +       cast(field2 as {{string_type}}) as field2
    +FROM source_data where id <= 3
    +
    +{% endif %}
    +"""
    +
    +_MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET = """
    +{{
    +    config(materialized='table')
    +}}
    +
    +with source_data as (
    +
    +    select * from {{ ref('model_a') }}
    +
    +)
    +
    +{% set string_type = dbt.type_string() %}
    +
    +select id
    +       ,cast(field1 as {{string_type}}) as field1
    +       --,field2
    +       ,cast(case when id <= 3 then null else field3 end as {{string_type}}) as field3
    +       ,cast(case when id <= 3 then null else field4 end as {{string_type}}) as field4
    +
    +from source_data
    +order by id
    +"""
    +
    +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET = """
    +{{
    +    config(materialized='table')
    +}}
    +
    +{% set string_type = dbt.type_string() %}
    +
    +with source_data as (
    +
    +    select * from {{ ref('model_a') }}
    +
    +)
    +
    +select id,
    +       cast(field1 as {{string_type}}) as field1,
    +       cast(CASE WHEN id >  3 THEN NULL ELSE field2 END as {{string_type}}) AS field2,
    +       cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3,
    +       cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4
    +
    +from source_data
    +"""
    diff --git a/tests/adapter/dbt/tests/adapter/incremental/test_incremental_on_schema_change.py b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_on_schema_change.py
    new file mode 100644
    index 00000000000..4fbefbe7651
    --- /dev/null
    +++ b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_on_schema_change.py
    @@ -0,0 +1,104 @@
    +import pytest
    +
    +from dbt.tests.util import (
    +    check_relations_equal,
    +    run_dbt,
    +)
    +
    +from dbt.tests.adapter.incremental.fixtures import (
    +    _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY,
    +    _MODELS__INCREMENTAL_IGNORE,
    +    _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET,
    +    _MODELS__INCREMENTAL_IGNORE_TARGET,
    +    _MODELS__INCREMENTAL_FAIL,
    +    _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS,
    +    _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE,
    +    _MODELS__A,
    +    _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET,
    +    _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS,
    +    _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET,
    +    _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET,
    +)
    +
    +
    +class BaseIncrementalOnSchemaChangeSetup:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "incremental_sync_remove_only.sql": _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY,
    +            "incremental_ignore.sql": _MODELS__INCREMENTAL_IGNORE,
    +            "incremental_sync_remove_only_target.sql": _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET,
    +            "incremental_ignore_target.sql": _MODELS__INCREMENTAL_IGNORE_TARGET,
    +            "incremental_fail.sql": _MODELS__INCREMENTAL_FAIL,
    +            "incremental_sync_all_columns.sql": _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS,
    +            "incremental_append_new_columns_remove_one.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE,
    +            "model_a.sql": _MODELS__A,
    +            "incremental_append_new_columns_target.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET,
    +            "incremental_append_new_columns.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS,
    +            "incremental_sync_all_columns_target.sql": _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET,
    +            "incremental_append_new_columns_remove_one_target.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET,
    +        }
    +
    +    def run_twice_and_assert(self, include, compare_source, compare_target, project):
    +
    +        # dbt run (twice)
    +        run_args = ["run"]
    +        if include:
    +            run_args.extend(("--select", include))
    +        results_one = run_dbt(run_args)
    +        assert len(results_one) == 3
    +
    +        results_two = run_dbt(run_args)
    +        assert len(results_two) == 3
    +
    +        check_relations_equal(project.adapter, [compare_source, compare_target])
    +
    +    def run_incremental_append_new_columns(self, project):
    +        select = "model_a incremental_append_new_columns incremental_append_new_columns_target"
    +        compare_source = "incremental_append_new_columns"
    +        compare_target = "incremental_append_new_columns_target"
    +        self.run_twice_and_assert(select, compare_source, compare_target, project)
    +
    +    def run_incremental_append_new_columns_remove_one(self, project):
    +        select = "model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target"
    +        compare_source = "incremental_append_new_columns_remove_one"
    +        compare_target = "incremental_append_new_columns_remove_one_target"
    +        self.run_twice_and_assert(select, compare_source, compare_target, project)
    +
    +    def run_incremental_sync_all_columns(self, project):
    +        select = "model_a incremental_sync_all_columns incremental_sync_all_columns_target"
    +        compare_source = "incremental_sync_all_columns"
    +        compare_target = "incremental_sync_all_columns_target"
    +        self.run_twice_and_assert(select, compare_source, compare_target, project)
    +
    +    def run_incremental_sync_remove_only(self, project):
    +        select = "model_a incremental_sync_remove_only incremental_sync_remove_only_target"
    +        compare_source = "incremental_sync_remove_only"
    +        compare_target = "incremental_sync_remove_only_target"
    +        self.run_twice_and_assert(select, compare_source, compare_target, project)
    +
    +
    +class BaseIncrementalOnSchemaChange(BaseIncrementalOnSchemaChangeSetup):
    +    def test_run_incremental_ignore(self, project):
    +        select = "model_a incremental_ignore incremental_ignore_target"
    +        compare_source = "incremental_ignore"
    +        compare_target = "incremental_ignore_target"
    +        self.run_twice_and_assert(select, compare_source, compare_target, project)
    +
    +    def test_run_incremental_append_new_columns(self, project):
    +        self.run_incremental_append_new_columns(project)
    +        self.run_incremental_append_new_columns_remove_one(project)
    +
    +    def test_run_incremental_sync_all_columns(self, project):
    +        self.run_incremental_sync_all_columns(project)
    +        self.run_incremental_sync_remove_only(project)
    +
    +    def test_run_incremental_fail_on_schema_change(self, project):
    +        select = "model_a incremental_fail"
    +        run_dbt(["run", "--models", select, "--full-refresh"])
    +        results_two = run_dbt(["run", "--models", select], expect_pass=False)
    +        assert "Compilation Error" in results_two[1].message
    +
    +
    +class TestIncrementalOnSchemaChange(BaseIncrementalOnSchemaChange):
    +    pass
    diff --git a/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py
    index 11a4b6c0384..2060e9eb6d4 100644
    --- a/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py
    +++ b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py
    @@ -64,10 +64,8 @@ def seeds(self):
         def project_config_update(self):
             return {
                 "models": {
    -                "+incremental_predicates": [
    -                    "id != 2"
    -                ],
    -                "+incremental_strategy": "delete+insert"
    +                "+incremental_predicates": ["id != 2"],
    +                "+incremental_strategy": "delete+insert",
                 }
             }
     
    @@ -123,16 +121,21 @@ def get_expected_fields(self, relation, seed_rows, opt_model_count=None):
                 inc_test_model_count=1,
                 seed_rows=seed_rows,
                 opt_model_count=opt_model_count,
    -            relation=relation
    +            relation=relation,
             )
     
         # no unique_key test
         def test__incremental_predicates(self, project):
             """seed should match model after two incremental runs"""
     
    -        expected_fields = self.get_expected_fields(relation="expected_delete_insert_incremental_predicates", seed_rows=4)
    +        expected_fields = self.get_expected_fields(
    +            relation="expected_delete_insert_incremental_predicates", seed_rows=4
    +        )
             test_case_fields = self.get_test_fields(
    -            project, seed="expected_delete_insert_incremental_predicates", incremental_model="delete_insert_incremental_predicates", update_sql_file=None
    +            project,
    +            seed="expected_delete_insert_incremental_predicates",
    +            incremental_model="delete_insert_incremental_predicates",
    +            update_sql_file=None,
             )
             self.check_scenario_correctness(expected_fields, test_case_fields, project)
     
    @@ -144,11 +147,4 @@ class TestIncrementalPredicatesDeleteInsert(BaseIncrementalPredicates):
     class TestPredicatesDeleteInsert(BaseIncrementalPredicates):
         @pytest.fixture(scope="class")
         def project_config_update(self):
    -        return {
    -            "models": {
    -                "+predicates": [
    -                    "id != 2"
    -                ],
    -                "+incremental_strategy": "delete+insert"
    -            }
    -        }
    +        return {"models": {"+predicates": ["id != 2"], "+incremental_strategy": "delete+insert"}}
    diff --git a/tests/adapter/dbt/tests/adapter/query_comment/test_query_comment.py b/tests/adapter/dbt/tests/adapter/query_comment/test_query_comment.py
    index b764568fe16..053fcc506c8 100644
    --- a/tests/adapter/dbt/tests/adapter/query_comment/test_query_comment.py
    +++ b/tests/adapter/dbt/tests/adapter/query_comment/test_query_comment.py
    @@ -1,6 +1,6 @@
     import pytest
     import json
    -from dbt.exceptions import RuntimeException
    +from dbt.exceptions import DbtRuntimeError
     from dbt.version import __version__ as dbt_version
     from dbt.tests.util import run_dbt_and_capture
     from dbt.tests.adapter.query_comment.fixtures import MACROS__MACRO_SQL, MODELS__X_SQL
    @@ -77,7 +77,7 @@ def project_config_update(self):
             return {"query-comment": "{{ invalid_query_header() }}"}
     
         def run_assert_comments(self):
    -        with pytest.raises(RuntimeException):
    +        with pytest.raises(DbtRuntimeError):
                 self.run_get_json(expect_pass=False)
     
     
    diff --git a/tests/adapter/dbt/tests/adapter/relations/test_changing_relation_type.py b/tests/adapter/dbt/tests/adapter/relations/test_changing_relation_type.py
    index 38515bc0206..2eeb5aea64d 100644
    --- a/tests/adapter/dbt/tests/adapter/relations/test_changing_relation_type.py
    +++ b/tests/adapter/dbt/tests/adapter/relations/test_changing_relation_type.py
    @@ -1,5 +1,3 @@
    -
    -
     from typing import List, Optional
     import pytest
     
    @@ -20,12 +18,10 @@
     class BaseChangeRelationTypeValidator:
         @pytest.fixture(scope="class")
         def models(self):
    -        return {
    -            "model_mc_modelface.sql": _DEFAULT_CHANGE_RELATION_TYPE_MODEL
    -        }
    +        return {"model_mc_modelface.sql": _DEFAULT_CHANGE_RELATION_TYPE_MODEL}
     
         def _run_and_check_materialization(self, materialization, extra_args: Optional[List] = None):
    -        run_args = ["run", '--vars', f'materialized: {materialization}']
    +        run_args = ["run", "--vars", f"materialized: {materialization}"]
             if extra_args:
                 run_args.extend(extra_args)
             results = run_dbt(run_args)
    @@ -33,11 +29,11 @@ def _run_and_check_materialization(self, materialization, extra_args: Optional[L
             assert len(results) == 1
     
         def test_changing_materialization_changes_relation_type(self, project):
    -        self._run_and_check_materialization('view')
    -        self._run_and_check_materialization('table')
    -        self._run_and_check_materialization('view')
    -        self._run_and_check_materialization('incremental')
    -        self._run_and_check_materialization('table', extra_args=['--full-refresh'])
    +        self._run_and_check_materialization("view")
    +        self._run_and_check_materialization("table")
    +        self._run_and_check_materialization("view")
    +        self._run_and_check_materialization("incremental")
    +        self._run_and_check_materialization("table", extra_args=["--full-refresh"])
     
     
     class TestChangeRelationTypes(BaseChangeRelationTypeValidator):
    diff --git a/tests/adapter/dbt/tests/adapter/utils/fixture_escape_single_quotes.py b/tests/adapter/dbt/tests/adapter/utils/fixture_escape_single_quotes.py
    index d7e7148b886..aeaaaa44193 100644
    --- a/tests/adapter/dbt/tests/adapter/utils/fixture_escape_single_quotes.py
    +++ b/tests/adapter/dbt/tests/adapter/utils/fixture_escape_single_quotes.py
    @@ -1,15 +1,37 @@
     # escape_single_quotes
     
     models__test_escape_single_quotes_quote_sql = """
    -select '{{ escape_single_quotes("they're") }}' as actual, 'they''re' as expected union all
    -select '{{ escape_single_quotes("they are") }}' as actual, 'they are' as expected
    +select
    +  '{{ escape_single_quotes("they're") }}' as actual,
    +  'they''re' as expected,
    +  {{ length(string_literal(escape_single_quotes("they're"))) }} as actual_length,
    +  7 as expected_length
    +
    +union all
    +
    +select
    +  '{{ escape_single_quotes("they are") }}' as actual,
    +  'they are' as expected,
    +  {{ length(string_literal(escape_single_quotes("they are"))) }} as actual_length,
    +  8 as expected_length
     """
     
     
     # The expected literal is 'they\'re'. The second backslash is to escape it from Python.
     models__test_escape_single_quotes_backslash_sql = """
    -select '{{ escape_single_quotes("they're") }}' as actual, 'they\\'re' as expected union all
    -select '{{ escape_single_quotes("they are") }}' as actual, 'they are' as expected
    +select
    +  '{{ escape_single_quotes("they're") }}' as actual,
    +  'they\\'re' as expected,
    +  {{ length(string_literal(escape_single_quotes("they're"))) }} as actual_length,
    +  7 as expected_length
    +
    +union all
    +
    +select
    +  '{{ escape_single_quotes("they are") }}' as actual,
    +  'they are' as expected,
    +  {{ length(string_literal(escape_single_quotes("they are"))) }} as actual_length,
    +  8 as expected_length
     """
     
     
    @@ -21,4 +43,7 @@
           - assert_equal:
               actual: actual
               expected: expected
    +      - assert_equal:
    +          actual: actual_length
    +          expected: expected_length
     """
    diff --git a/tests/adapter/setup.py b/tests/adapter/setup.py
    index f9ac627e445..c4c1e393483 100644
    --- a/tests/adapter/setup.py
    +++ b/tests/adapter/setup.py
    @@ -20,7 +20,7 @@
     
     
     package_name = "dbt-tests-adapter"
    -package_version = "1.4.0b1"
    +package_version = "1.5.0a1"
     description = """The dbt adapter tests for adapter plugins"""
     
     this_directory = os.path.abspath(os.path.dirname(__file__))
    diff --git a/tests/functional/artifacts/expected_manifest.py b/tests/functional/artifacts/expected_manifest.py
    index 51a6b633e40..6e1e8e89af5 100644
    --- a/tests/functional/artifacts/expected_manifest.py
    +++ b/tests/functional/artifacts/expected_manifest.py
    @@ -1061,9 +1061,7 @@ def expected_references_manifest(project):
                     "unique_id": "seed.test.seed",
                     "checksum": checksum_file(seed_path),
                     "unrendered_config": get_unrendered_seed_config(),
    -                "relation_name": '"{0}"."{1}".seed'.format(
    -                    project.database, my_schema_name
    -                ),
    +                "relation_name": '"{0}"."{1}".seed'.format(project.database, my_schema_name),
                 },
                 "snapshot.test.snapshot_seed": {
                     "alias": "snapshot_seed",
    @@ -1244,9 +1242,7 @@ def expected_references_manifest(project):
                     "unique_id": "doc.test.table_info",
                 },
                 "doc.test.view_summary": {
    -                "block_contents": (
    -                    "A view of the summary of the ephemeral copy of the seed data"
    -                ),
    +                "block_contents": ("A view of the summary of the ephemeral copy of the seed data"),
                     "resource_type": "doc",
                     "name": "view_summary",
                     "original_file_path": docs_path,
    diff --git a/tests/functional/artifacts/test_override.py b/tests/functional/artifacts/test_override.py
    index 46a037bdcc5..a7b689a3670 100644
    --- a/tests/functional/artifacts/test_override.py
    +++ b/tests/functional/artifacts/test_override.py
    @@ -1,6 +1,6 @@
     import pytest
     from dbt.tests.util import run_dbt
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     
     model_sql = """
     select 1 as id
    @@ -30,6 +30,6 @@ def test_override_used(
             results = run_dbt(["run"])
             assert len(results) == 1
             # this should pick up our failure macro and raise a compilation exception
    -        with pytest.raises(CompilationException) as excinfo:
    +        with pytest.raises(CompilationError) as excinfo:
                 run_dbt(["--warn-error", "docs", "generate"])
             assert "rejected: no catalogs for you" in str(excinfo.value)
    diff --git a/tests/functional/artifacts/test_previous_version_state.py b/tests/functional/artifacts/test_previous_version_state.py
    index a7a7ed5417c..84fd8bab360 100644
    --- a/tests/functional/artifacts/test_previous_version_state.py
    +++ b/tests/functional/artifacts/test_previous_version_state.py
    @@ -2,7 +2,7 @@
     import os
     import shutil
     from dbt.tests.util import run_dbt
    -from dbt.exceptions import IncompatibleSchemaException
    +from dbt.exceptions import IncompatibleSchemaError
     from dbt.contracts.graph.manifest import WritableManifest
     
     # This is a *very* simple project, with just one model in it.
    @@ -84,7 +84,7 @@ def compare_previous_state(
                 results = run_dbt(cli_args, expect_pass=expect_pass)
                 assert len(results) == 0
             else:
    -            with pytest.raises(IncompatibleSchemaException):
    +            with pytest.raises(IncompatibleSchemaError):
                     run_dbt(cli_args, expect_pass=expect_pass)
     
         def test_compare_state_current(self, project):
    diff --git a/tests/functional/basic/test_invalid_reference.py b/tests/functional/basic/test_invalid_reference.py
    index 8a516027940..1c54d1b906a 100644
    --- a/tests/functional/basic/test_invalid_reference.py
    +++ b/tests/functional/basic/test_invalid_reference.py
    @@ -1,6 +1,6 @@
     import pytest
     from dbt.tests.util import run_dbt
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     
     
     descendant_sql = """
    @@ -24,5 +24,5 @@ def models():
     
     def test_undefined_value(project):
         # Tests that a project with an invalid reference fails
    -    with pytest.raises(CompilationException):
    +    with pytest.raises(CompilationError):
             run_dbt(["compile"])
    diff --git a/tests/functional/build/fixtures.py b/tests/functional/build/fixtures.py
    new file mode 100644
    index 00000000000..7c4d93e6186
    --- /dev/null
    +++ b/tests/functional/build/fixtures.py
    @@ -0,0 +1,268 @@
    +seeds__country_csv = """iso3,name,iso2,iso_numeric,cow_alpha,cow_numeric,fao_code,un_code,wb_code,imf_code,fips,geonames_name,geonames_id,r_name,aiddata_name,aiddata_code,oecd_name,oecd_code,historical_name,historical_iso3,historical_iso2,historical_iso_numeric
    +ABW,Aruba,AW,533,,,,533,ABW,314,AA,Aruba,3577279,ARUBA,Aruba,12,Aruba,373,,,,
    +AFG,Afghanistan,AF,4,AFG,700,2,4,AFG,512,AF,Afghanistan,1149361,AFGHANISTAN,Afghanistan,1,Afghanistan,625,,,,
    +AGO,Angola,AO,24,ANG,540,7,24,AGO,614,AO,Angola,3351879,ANGOLA,Angola,7,Angola,225,,,,
    +AIA,Anguilla,AI,660,,,,660,AIA,312,AV,Anguilla,3573511,ANGUILLA,Anguilla,8,Anguilla,376,,,,
    +ALA,Aland Islands,AX,248,,,,248,ALA,,,Aland Islands,661882,ALAND ISLANDS,,,,,,,,
    +ALB,Albania,AL,8,ALB,339,3,8,ALB,914,AL,Albania,783754,ALBANIA,Albania,3,Albania,71,,,,
    +AND,Andorra,AD,20,AND,232,6,20,ADO,,AN,Andorra,3041565,ANDORRA,,,,,,,,
    +ANT,Netherlands Antilles,AN,530,,,,,ANT,353,NT,Netherlands Antilles,,NETHERLANDS ANTILLES,Netherlands Antilles,211,Netherlands Antilles,361,Netherlands Antilles,ANT,AN,530
    +ARE,United Arab Emirates,AE,784,UAE,696,225,784,ARE,466,AE,United Arab Emirates,290557,UNITED ARAB EMIRATES,United Arab Emirates,140,United Arab Emirates,576,,,,
    +"""
    +
    +snapshots__snap_0 = """
    +{% snapshot snap_0 %}
    +
    +{{
    +    config(
    +      target_database=database,
    +      target_schema=schema,
    +      unique_key='iso3',
    +
    +      strategy='timestamp',
    +      updated_at='snap_0_updated_at',
    +    )
    +}}
    +
    +select *, current_timestamp as snap_0_updated_at from {{ ref('model_0') }}
    +
    +{% endsnapshot %}
    +"""
    +
    +snapshots__snap_1 = """
    +{% snapshot snap_1 %}
    +
    +{{
    +    config(
    +      target_database=database,
    +      target_schema=schema,
    +      unique_key='iso3',
    +
    +      strategy='timestamp',
    +      updated_at='snap_1_updated_at',
    +    )
    +}}
    +
    +SELECT
    +  iso3,
    +  name,
    +  iso2,
    +  iso_numeric,
    +  cow_alpha,
    +  cow_numeric,
    +  fao_code,
    +  un_code,
    +  wb_code,
    +  imf_code,
    +  fips,
    +  geonames_name,
    +  geonames_id,
    +  r_name,
    +  aiddata_name,
    +  aiddata_code,
    +  oecd_name,
    +  oecd_code,
    +  historical_name,
    +  historical_iso3,
    +  historical_iso2,
    +  historical_iso_numeric,
    +  current_timestamp as snap_1_updated_at from {{ ref('model_1') }}
    +
    +{% endsnapshot %}
    +"""
    +
    +snapshots__snap_99 = """
    +{% snapshot snap_99 %}
    +
    +{{
    +    config(
    +      target_database=database,
    +      target_schema=schema,
    +      strategy='timestamp',
    +      unique_key='num',
    +      updated_at='snap_99_updated_at',
    +    )
    +}}
    +
    +select *, current_timestamp as snap_99_updated_at from {{ ref('model_99') }}
    +
    +{% endsnapshot %}
    +"""
    +
    +models__model_0_sql = """
    +{{ config(materialized='table') }}
    +
    +select * from {{ ref('countries') }}
    +"""
    +
    +models__model_1_sql = """
    +{{ config(materialized='table') }}
    +
    +select * from {{ ref('snap_0') }}
    +"""
    +
    +models__model_2_sql = """
    +{{ config(materialized='table') }}
    +
    +select * from {{ ref('snap_1') }}
    +"""
    +
    +models__model_3_sql = """
    +{{ config(materialized='table') }}
    +
    +select * from {{ ref('model_1') }}
    +"""
    +
    +models__model_99_sql = """
    +{{ config(materialized='table') }}
    +
    +select '1' as "num"
    +"""
    +
    +models__test_yml = """
    +version: 2
    +
    +models:
    +  - name: model_0
    +    columns:
    +      - name: iso3
    +        tests:
    +          - unique
    +          - not_null
    +  - name: model_2
    +    columns:
    +      - name: iso3
    +        tests:
    +          - unique
    +          - not_null
    +"""
    +
    +models_failing_tests__tests_yml = """
    +version: 2
    +
    +models:
    +  - name: model_0
    +    columns:
    +      - name: iso3
    +        tests:
    +          - unique
    +          - not_null
    +      - name: historical_iso_numeric
    +        tests:
    +          - not_null
    +  - name: model_2
    +    columns:
    +      - name: iso3
    +        tests:
    +          - unique
    +          - not_null
    +"""
    +
    +models_failing__model_1_sql = """
    +{{ config(materialized='table') }}
    +
    +select bad_column from {{ ref('snap_0') }}
    +"""
    +
    +
    +models_circular_relationship__test_yml = """
    +version: 2
    +
    +models:
    +  - name: model_0
    +    columns:
    +      - name: iso3
    +        tests:
    +          - relationships:
    +              to: ref('model_1')
    +              field: iso3
    +
    +  - name: model_1
    +    columns:
    +      - name: iso3
    +        tests:
    +          - relationships:
    +              to: ref('model_0')
    +              field: iso3
    +
    +"""
    +
    +models_simple_blocking__model_a_sql = """
    +select null as id
    +"""
    +
    +models_simple_blocking__model_b_sql = """
    +select * from {{ ref('model_a') }}
    +"""
    +
    +models_simple_blocking__test_yml = """
    +version: 2
    +
    +models:
    +  - name: model_a
    +    columns:
    +      - name: id
    +        tests:
    +          - not_null
    +"""
    +
    +models_interdependent__model_a_sql = """
    +select 1 as id
    +"""
    +
    +models_interdependent__model_b_sql = """
    +select * from {{ ref('model_a') }}
    +"""
    +
    +models_interdependent__model_b_null_sql = """
    +select null from {{ ref('model_a') }}
    +"""
    +
    +
    +models_interdependent__model_c_sql = """
    +select * from {{ ref('model_b') }}
    +"""
    +
    +models_interdependent__test_yml = """
    +version: 2
    +
    +models:
    +  - name: model_a
    +    columns:
    +      - name: id
    +        tests:
    +          - unique
    +          - not_null
    +          - relationships:
    +              to: ref('model_b')
    +              field: id
    +          - relationships:
    +              to: ref('model_c')
    +              field: id
    +
    +  - name: model_b
    +    columns:
    +      - name: id
    +        tests:
    +          - unique
    +          - not_null
    +          - relationships:
    +              to: ref('model_a')
    +              field: id
    +          - relationships:
    +              to: ref('model_c')
    +              field: id
    +
    +  - name: model_c
    +    columns:
    +      - name: id
    +        tests:
    +          - unique
    +          - not_null
    +          - relationships:
    +              to: ref('model_a')
    +              field: id
    +          - relationships:
    +              to: ref('model_b')
    +              field: id
    +"""
    diff --git a/tests/functional/build/test_build.py b/tests/functional/build/test_build.py
    new file mode 100644
    index 00000000000..eb9529be102
    --- /dev/null
    +++ b/tests/functional/build/test_build.py
    @@ -0,0 +1,198 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt
    +from tests.functional.build.fixtures import (
    +    seeds__country_csv,
    +    snapshots__snap_0,
    +    snapshots__snap_1,
    +    snapshots__snap_99,
    +    models__test_yml,
    +    models__model_0_sql,
    +    models__model_1_sql,
    +    models__model_2_sql,
    +    models__model_3_sql,
    +    models__model_99_sql,
    +    models_failing__model_1_sql,
    +    models_circular_relationship__test_yml,
    +    models_failing_tests__tests_yml,
    +    models_simple_blocking__model_a_sql,
    +    models_simple_blocking__model_b_sql,
    +    models_simple_blocking__test_yml,
    +    models_interdependent__test_yml,
    +    models_interdependent__model_a_sql,
    +    models_interdependent__model_b_sql,
    +    models_interdependent__model_b_null_sql,
    +    models_interdependent__model_c_sql,
    +)
    +
    +
    +class TestBuildBase:
    +    @pytest.fixture(scope="class")
    +    def seeds(self):
    +        return {"countries.csv": seeds__country_csv}
    +
    +    @pytest.fixture(scope="class")
    +    def snapshots(self):
    +        return {
    +            "snap_0.sql": snapshots__snap_0,
    +            "snap_1.sql": snapshots__snap_1,
    +            "snap_99.sql": snapshots__snap_99,
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        return {
    +            "seeds": {
    +                "quote_columns": False,
    +            },
    +        }
    +
    +
    +class TestPassingBuild(TestBuildBase):
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_0.sql": models__model_0_sql,
    +            "model_1.sql": models__model_1_sql,
    +            "model_2.sql": models__model_2_sql,
    +            "model_99.sql": models__model_99_sql,
    +            "test.yml": models__test_yml,
    +        }
    +
    +    def test_build_happy_path(self, project):
    +        run_dbt(["build"])
    +
    +
    +class TestFailingBuild(TestBuildBase):
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_0.sql": models__model_0_sql,
    +            "model_1.sql": models_failing__model_1_sql,
    +            "model_2.sql": models__model_2_sql,
    +            "model_3.sql": models__model_3_sql,
    +            "model_99.sql": models__model_99_sql,
    +            "test.yml": models__test_yml,
    +        }
    +
    +    def test_failing_test_skips_downstream(self, project):
    +        results = run_dbt(["build"], expect_pass=False)
    +        assert len(results) == 13
    +        actual = [str(r.status) for r in results]
    +        expected = ["error"] * 1 + ["skipped"] * 5 + ["pass"] * 2 + ["success"] * 5
    +
    +        assert sorted(actual) == sorted(expected)
    +
    +
    +class TestFailingTestsBuild(TestBuildBase):
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_0.sql": models__model_0_sql,
    +            "model_1.sql": models__model_1_sql,
    +            "model_2.sql": models__model_2_sql,
    +            "model_99.sql": models__model_99_sql,
    +            "test.yml": models_failing_tests__tests_yml,
    +        }
    +
    +    def test_failing_test_skips_downstream(self, project):
    +        results = run_dbt(["build"], expect_pass=False)
    +        assert len(results) == 13
    +        actual = [str(r.status) for r in results]
    +        expected = ["fail"] + ["skipped"] * 6 + ["pass"] * 2 + ["success"] * 4
    +        assert sorted(actual) == sorted(expected)
    +
    +
    +class TestCircularRelationshipTestsBuild(TestBuildBase):
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_0.sql": models__model_0_sql,
    +            "model_1.sql": models__model_1_sql,
    +            "model_99.sql": models__model_99_sql,
    +            "test.yml": models_circular_relationship__test_yml,
    +        }
    +
    +    def test_circular_relationship_test_success(self, project):
    +        """Ensure that tests that refer to each other's model don't create
    +        a circular dependency."""
    +        results = run_dbt(["build"])
    +        actual = [str(r.status) for r in results]
    +        expected = ["success"] * 7 + ["pass"] * 2
    +
    +        assert sorted(actual) == sorted(expected)
    +
    +
    +class TestSimpleBlockingTest:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_a.sql": models_simple_blocking__model_a_sql,
    +            "model_b.sql": models_simple_blocking__model_b_sql,
    +            "test.yml": models_simple_blocking__test_yml,
    +        }
    +
    +    def test_simple_blocking_test(self, project):
    +        """Ensure that a failed test on model_a always blocks model_b"""
    +        results = run_dbt(["build"], expect_pass=False)
    +        actual = [r.status for r in results]
    +        expected = ["success", "fail", "skipped"]
    +        assert sorted(actual) == sorted(expected)
    +
    +
    +class TestInterdependentModels:
    +    @pytest.fixture(scope="class")
    +    def seeds(self):
    +        return {"countries.csv": seeds__country_csv}
    +
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        return {
    +            "seeds": {
    +                "quote_columns": False,
    +            },
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_a.sql": models_interdependent__model_a_sql,
    +            "model_b.sql": models_interdependent__model_b_sql,
    +            "model_c.sql": models_interdependent__model_c_sql,
    +            "test.yml": models_interdependent__test_yml,
    +        }
    +
    +    def test_interdependent_models(self, project):
    +        results = run_dbt(["build"])
    +        assert len(results) == 16
    +
    +
    +class TestInterdependentModelsFail:
    +    @pytest.fixture(scope="class")
    +    def seeds(self):
    +        return {"countries.csv": seeds__country_csv}
    +
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        return {
    +            "seeds": {
    +                "quote_columns": False,
    +            },
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_a.sql": models_interdependent__model_a_sql,
    +            "model_b.sql": models_interdependent__model_b_null_sql,
    +            "model_c.sql": models_interdependent__model_c_sql,
    +            "test.yml": models_interdependent__test_yml,
    +        }
    +
    +    def test_interdependent_models_fail(self, project):
    +        results = run_dbt(["build"], expect_pass=False)
    +        assert len(results) == 16
    +
    +        actual = [str(r.status) for r in results]
    +        expected = ["error"] * 4 + ["skipped"] * 7 + ["pass"] * 2 + ["success"] * 3
    +        assert sorted(actual) == sorted(expected)
    diff --git a/tests/functional/colors/test_colors.py b/tests/functional/colors/test_colors.py
    index 7e92e039506..f42591c2b6a 100644
    --- a/tests/functional/colors/test_colors.py
    +++ b/tests/functional/colors/test_colors.py
    @@ -16,7 +16,7 @@ def models():
     
     @pytest.fixture(scope="class")
     def project_config_update():
    -    return {'config-version': 2}
    +    return {"config-version": 2}
     
     
     class TestColors:
    diff --git a/tests/functional/column_quoting/test_column_quotes.py b/tests/functional/column_quoting/test_column_quotes.py
    new file mode 100644
    index 00000000000..362f7b0d4de
    --- /dev/null
    +++ b/tests/functional/column_quoting/test_column_quotes.py
    @@ -0,0 +1,100 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt
    +
    +_MODELS__COLUMN_QUOTING_DEFAULT = """
    +{% set col_a = '"col_A"' %}
    +{% set col_b = '"col_B"' %}
    +
    +{{
    +  config(
    +    materialized = 'incremental',
    +    unique_key = col_a,
    +  )
    +}}
    +
    +select
    +  {{ col_a }},
    +  {{ col_b }}
    +from {{ref('seed')}}
    +"""
    +
    +_MODELS__COLUMN_QUOTING_NO_QUOTING = """
    +{% set col_a = '"col_a"' %}
    +{% set col_b = '"col_b"' %}
    +
    +{{
    +  config(
    +    materialized = 'incremental',
    +    unique_key = col_a,
    +  )
    +}}
    +
    +select
    +  {{ col_a }},
    +  {{ col_b }}
    +from {{ref('seed')}}
    +"""
    +
    +_SEEDS_BASIC_SEED = """col_A,col_B
    +1,2
    +3,4
    +5,6
    +"""
    +
    +
    +class BaseColumnQuotingTest:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"model.sql": _MODELS__COLUMN_QUOTING_DEFAULT}
    +
    +    @pytest.fixture(scope="class")
    +    def seeds(self):
    +        return {"seed.csv": _SEEDS_BASIC_SEED}
    +
    +    @pytest.fixture(scope="function")
    +    def run_column_quotes(self, project):
    +        def fixt():
    +            results = run_dbt(["seed"])
    +            assert len(results) == 1
    +            results = run_dbt(["run"])
    +            assert len(results) == 1
    +            results = run_dbt(["run"])
    +            assert len(results) == 1
    +
    +        return fixt
    +
    +
    +class TestColumnQuotingDefault(BaseColumnQuotingTest):
    +    def test_column_quotes(self, run_column_quotes):
    +        run_column_quotes()
    +
    +
    +class TestColumnQuotingEnabled(BaseColumnQuotingTest):
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        return {
    +            "seeds": {
    +                "quote_columns": True,
    +            },
    +        }
    +
    +    def test_column_quotes(self, run_column_quotes):
    +        run_column_quotes()
    +
    +
    +class TestColumnQuotingDisabled(BaseColumnQuotingTest):
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"model.sql": _MODELS__COLUMN_QUOTING_NO_QUOTING}
    +
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        return {
    +            "seeds": {
    +                "quote_columns": False,
    +            },
    +        }
    +
    +    def test_column_quotes(self, run_column_quotes):
    +        run_column_quotes()
    diff --git a/tests/functional/configs/test_configs.py b/tests/functional/configs/test_configs.py
    index 489b60fbbb1..086ef455f18 100644
    --- a/tests/functional/configs/test_configs.py
    +++ b/tests/functional/configs/test_configs.py
    @@ -1,9 +1,8 @@
    -
     from hologram import ValidationError
     import pytest
     import os
     
    -from dbt.exceptions import ParsingException
    +from dbt.exceptions import ParsingError
     from dbt.tests.util import run_dbt, update_config_file, write_file, check_relations_equal
     from tests.functional.configs.fixtures import BaseConfigProject, simple_snapshot
     
    @@ -94,7 +93,11 @@ def test_seeds_materialization_proj_config(self, project):
     class TestInvalidSeedsMaterializationSchema(object):
         def test_seeds_materialization_schema_config(self, project):
             seeds_dir = os.path.join(project.project_root, "seeds")
    -        write_file("version: 2\nseeds:\n  - name: myseed\n    config:\n      materialized: table", seeds_dir, "schema.yml")
    +        write_file(
    +            "version: 2\nseeds:\n  - name: myseed\n    config:\n      materialized: table",
    +            seeds_dir,
    +            "schema.yml",
    +        )
             write_file("id1, id2\n1, 2", seeds_dir, "myseed.csv")
     
             with pytest.raises(ValidationError):
    @@ -109,14 +112,18 @@ def test_snapshots_materialization_proj_config(self, project):
             snapshots_dir = os.path.join(project.project_root, "snapshots")
             write_file(simple_snapshot, snapshots_dir, "mysnapshot.sql")
     
    -        with pytest.raises(ParsingException):
    +        with pytest.raises(ParsingError):
                 run_dbt()
     
     
     class TestInvalidSnapshotsMaterializationSchema(object):
         def test_snapshots_materialization_schema_config(self, project):
             snapshots_dir = os.path.join(project.project_root, "snapshots")
    -        write_file("version: 2\nsnapshots:\n  - name: mysnapshot\n    config:\n      materialized: table", snapshots_dir, "schema.yml")
    +        write_file(
    +            "version: 2\nsnapshots:\n  - name: mysnapshot\n    config:\n      materialized: table",
    +            snapshots_dir,
    +            "schema.yml",
    +        )
             write_file(simple_snapshot, snapshots_dir, "mysnapshot.sql")
     
             with pytest.raises(ValidationError):
    diff --git a/tests/functional/configs/test_configs_in_schema_files.py b/tests/functional/configs/test_configs_in_schema_files.py
    index 0d702615474..a04b9ed43aa 100644
    --- a/tests/functional/configs/test_configs_in_schema_files.py
    +++ b/tests/functional/configs/test_configs_in_schema_files.py
    @@ -2,7 +2,7 @@
     
     from dbt.tests.util import run_dbt, get_manifest, check_relations_equal, write_file
     
    -from dbt.exceptions import CompilationException, ParsingException
    +from dbt.exceptions import CompilationError, ParsingError
     
     models_alt__schema_yml = """
     version: 2
    @@ -242,11 +242,11 @@ def test_config_layering(
             # copy a schema file with multiple metas
             #       shutil.copyfile('extra-alt/untagged.yml', 'models-alt/untagged.yml')
             write_file(extra_alt__untagged_yml, project.project_root, "models", "untagged.yml")
    -        with pytest.raises(ParsingException):
    +        with pytest.raises(ParsingError):
                 run_dbt(["run"])
     
             # copy a schema file with config key in top-level of test and in config dict
             #       shutil.copyfile('extra-alt/untagged2.yml', 'models-alt/untagged.yml')
             write_file(extra_alt__untagged2_yml, project.project_root, "models", "untagged.yml")
    -        with pytest.raises(CompilationException):
    +        with pytest.raises(CompilationError):
                 run_dbt(["run"])
    diff --git a/tests/functional/configs/test_disabled_model.py b/tests/functional/configs/test_disabled_model.py
    index 5ca56512e14..4b6e74adffd 100644
    --- a/tests/functional/configs/test_disabled_model.py
    +++ b/tests/functional/configs/test_disabled_model.py
    @@ -2,7 +2,7 @@
     from hologram import ValidationError
     from dbt.tests.util import run_dbt, get_manifest
     
    -from dbt.exceptions import CompilationException, ParsingException
    +from dbt.exceptions import CompilationError, ParsingError
     
     from tests.functional.configs.fixtures import (
         schema_all_disabled_yml,
    @@ -47,7 +47,7 @@ def models(self):
             }
     
         def test_disabled_config(self, project):
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt(["parse"])
             exc_str = " ".join(str(exc.value).split())  # flatten all whitespace
             expected_msg = "which is disabled"
    @@ -209,7 +209,7 @@ def models(self):
             }
     
         def test_disabled_config(self, project):
    -        with pytest.raises(ParsingException) as exc:
    +        with pytest.raises(ParsingError) as exc:
                 run_dbt(["parse"])
             exc_str = " ".join(str(exc.value).split())  # flatten all whitespace
             expected_msg = "Found 3 matching disabled nodes for model 'my_model_2'"
    diff --git a/tests/functional/configs/test_unused_configs.py b/tests/functional/configs/test_unused_configs.py
    index 7796472fea9..1bc887b03f1 100644
    --- a/tests/functional/configs/test_unused_configs.py
    +++ b/tests/functional/configs/test_unused_configs.py
    @@ -1,7 +1,7 @@
     import pytest
     
     from dbt.tests.util import run_dbt
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     
     seeds__seed_csv = """id,value
     4,2
    @@ -41,7 +41,7 @@ def test_warn_unused_configuration_paths(
             self,
             project,
         ):
    -        with pytest.raises(CompilationException) as excinfo:
    +        with pytest.raises(CompilationError) as excinfo:
                 run_dbt(["--warn-error", "seed"])
     
             assert "Configuration paths exist" in str(excinfo.value)
    diff --git a/tests/functional/context_methods/test_builtin_functions.py b/tests/functional/context_methods/test_builtin_functions.py
    index e2f416d2fb4..562118f946f 100644
    --- a/tests/functional/context_methods/test_builtin_functions.py
    +++ b/tests/functional/context_methods/test_builtin_functions.py
    @@ -3,7 +3,7 @@
     import os
     
     from dbt.tests.util import run_dbt, run_dbt_and_capture, write_file
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     
     macros__validate_set_sql = """
     {% macro validate_set() %}
    @@ -112,8 +112,19 @@ def test_builtin_invocation_args_dict_function(self, project):
             expected = "invocation_result: {'debug': True, 'log_format': 'json', 'write_json': True, 'use_colors': True, 'printer_width': 80, 'version_check': True, 'partial_parse': True, 'static_parser': True, 'profiles_dir': "
             assert expected in str(result)
     
    -        expected = "'send_anonymous_usage_stats': False, 'quiet': False, 'no_print': False, 'macro': 'validate_invocation', 'args': '{my_variable: test_variable}', 'which': 'run-operation', 'rpc_method': 'run-operation', 'anonymous_usage_stats': True, 'indirect_selection': 'eager'}"
    -        assert expected in str(result)
    +        expected = (
    +            "'send_anonymous_usage_stats': False",
    +            "'quiet': False",
    +            "'no_print': False",
    +            "'cache_selected_only': False",
    +            "'macro': 'validate_invocation'",
    +            "'args': '{my_variable: test_variable}'",
    +            "'which': 'run-operation'",
    +            "'rpc_method': 'run-operation'",
    +            "'indirect_selection': 'eager'",
    +        )
    +        for element in expected:
    +            assert element in str(result)
     
         def test_builtin_dbt_metadata_envs_function(self, project, monkeypatch):
             envs = {
    @@ -141,9 +152,9 @@ class TestContextBuiltinExceptions:
         # Assert compilation errors are raised with _strict equivalents
         def test_builtin_function_exception(self, project):
             write_file(models__set_exception_sql, project.project_root, "models", "raise.sql")
    -        with pytest.raises(CompilationException):
    +        with pytest.raises(CompilationError):
                 run_dbt(["compile"])
     
             write_file(models__zip_exception_sql, project.project_root, "models", "raise.sql")
    -        with pytest.raises(CompilationException):
    +        with pytest.raises(CompilationError):
                 run_dbt(["compile"])
    diff --git a/tests/functional/context_methods/test_cli_vars.py b/tests/functional/context_methods/test_cli_vars.py
    index 3e548b6f402..353d96d777b 100644
    --- a/tests/functional/context_methods/test_cli_vars.py
    +++ b/tests/functional/context_methods/test_cli_vars.py
    @@ -5,7 +5,7 @@
     
     from dbt.tests.util import run_dbt, get_artifact, write_config_file
     from dbt.tests.fixtures.project import write_project_files
    -from dbt.exceptions import RuntimeException, CompilationException
    +from dbt.exceptions import DbtRuntimeError, CompilationError
     
     
     models_complex__schema_yml = """
    @@ -114,7 +114,7 @@ def test_cli_vars_in_profile(self, project, dbt_profile_data):
             profile = dbt_profile_data
             profile["test"]["outputs"]["default"]["host"] = "{{ var('db_host') }}"
             write_config_file(profile, project.profiles_dir, "profiles.yml")
    -        with pytest.raises(RuntimeException):
    +        with pytest.raises(DbtRuntimeError):
                 results = run_dbt(["run"])
             results = run_dbt(["run", "--vars", "db_host: localhost"])
             assert len(results) == 1
    @@ -148,7 +148,7 @@ def test_cli_vars_in_packages(self, project, packages_config):
             write_config_file(packages, project.project_root, "packages.yml")
     
             # Without vars args deps fails
    -        with pytest.raises(RuntimeException):
    +        with pytest.raises(DbtRuntimeError):
                 run_dbt(["deps"])
     
             # With vars arg deps succeeds
    @@ -200,7 +200,7 @@ def test_vars_in_selectors(self, project):
     
             # Update the selectors.yml file to have a var
             write_config_file(var_selectors_yml, project.project_root, "selectors.yml")
    -        with pytest.raises(CompilationException):
    +        with pytest.raises(CompilationError):
                 run_dbt(["run"])
     
             # Var in cli_vars works
    diff --git a/tests/functional/context_methods/test_custom_env_vars.py b/tests/functional/context_methods/test_custom_env_vars.py
    index 413789c7676..e74a5dcee09 100644
    --- a/tests/functional/context_methods/test_custom_env_vars.py
    +++ b/tests/functional/context_methods/test_custom_env_vars.py
    @@ -27,7 +27,9 @@ def setup(self):
             del os.environ["DBT_ENV_CUSTOM_ENV_SOME_VAR"]
     
         def test_extra_filled(self, project):
    -        _, log_output = run_dbt_and_capture(['--log-format=json', 'deps'],)
    +        _, log_output = run_dbt_and_capture(
    +            ["--log-format=json", "deps"],
    +        )
             logs = parse_json_logs(log_output)
             for log in logs:
    -            assert log['info'].get('extra') == {"SOME_VAR": "value"}
    +            assert log["info"].get("extra") == {"SOME_VAR": "value"}
    diff --git a/tests/functional/context_methods/test_secret_env_vars.py b/tests/functional/context_methods/test_secret_env_vars.py
    index 9cd4c2eacac..710c104f551 100644
    --- a/tests/functional/context_methods/test_secret_env_vars.py
    +++ b/tests/functional/context_methods/test_secret_env_vars.py
    @@ -2,7 +2,7 @@
     import os
     
     from dbt.constants import SECRET_ENV_PREFIX
    -from dbt.exceptions import ParsingException, InternalException
    +from dbt.exceptions import ParsingError, DbtInternalError
     from tests.functional.context_methods.first_dependency import FirstDependencyProject
     from dbt.tests.util import run_dbt, run_dbt_and_capture
     
    @@ -30,7 +30,7 @@ def models(self):
             return {"context.sql": secret_bad__context_sql}
     
         def test_disallow_secret(self, project):
    -        with pytest.raises(ParsingException):
    +        with pytest.raises(ParsingError):
                 run_dbt(["compile"])
     
     
    @@ -130,7 +130,7 @@ def packages(self):
             }
     
         def test_fail_clone_with_scrubbing(self, project):
    -        with pytest.raises(InternalException) as excinfo:
    +        with pytest.raises(DbtInternalError) as excinfo:
                 _, log_output = run_dbt_and_capture(["deps"])
     
             assert "abc123" not in str(excinfo.value)
    @@ -149,7 +149,7 @@ def packages(self):
             }
     
         def test_fail_clone_with_scrubbing(self, project):
    -        with pytest.raises(InternalException) as excinfo:
    +        with pytest.raises(DbtInternalError) as excinfo:
                 _, log_output = run_dbt_and_capture(["deps"])
     
             # we should not see any manipulated form of the secret value (abc123) here
    diff --git a/tests/functional/context_methods/test_var_in_generate_name.py b/tests/functional/context_methods/test_var_in_generate_name.py
    index 5025cb8fede..2bbba457e58 100644
    --- a/tests/functional/context_methods/test_var_in_generate_name.py
    +++ b/tests/functional/context_methods/test_var_in_generate_name.py
    @@ -1,7 +1,7 @@
     import pytest
     
     from dbt.tests.util import run_dbt, update_config_file
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     
     model_sql = """
     select 1 as id
    @@ -27,7 +27,7 @@ def models(self):
     
         def test_generate_schema_name_var(self, project):
             # var isn't set, so generate_name macro fails
    -        with pytest.raises(CompilationException) as excinfo:
    +        with pytest.raises(CompilationError) as excinfo:
                 run_dbt(["compile"])
     
             assert "Required var 'somevar' not found in config" in str(excinfo.value)
    diff --git a/tests/functional/custom_aliases/fixtures.py b/tests/functional/custom_aliases/fixtures.py
    new file mode 100644
    index 00000000000..6324e1249e4
    --- /dev/null
    +++ b/tests/functional/custom_aliases/fixtures.py
    @@ -0,0 +1,68 @@
    +model1_sql = """
    +{{ config(materialized='table', alias='alias') }}
    +
    +select {{ string_literal(this.name) }} as model_name
    +"""
    +
    +model2_sql = """
    +{{ config(materialized='table') }}
    +
    +select {{ string_literal(this.name) }} as model_name
    +"""
    +
    +macros_sql = """
    +{% macro generate_alias_name(custom_alias_name, node) -%}
    +    {%- if custom_alias_name is none -%}
    +        {{ node.name }}
    +    {%- else -%}
    +        custom_{{ custom_alias_name | trim }}
    +    {%- endif -%}
    +{%- endmacro %}
    +
    +
    +{% macro string_literal(s) -%}
    +  {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }}
    +{%- endmacro %}
    +
    +{% macro default__string_literal(s) %}
    +    '{{ s }}'::text
    +{% endmacro %}
    +"""
    +
    +macros_config_sql = """
    +{#-- Verify that the config['alias'] key is present #}
    +{% macro generate_alias_name(custom_alias_name, node) -%}
    +    {%- if custom_alias_name is none -%}
    +        {{ node.name }}
    +    {%- else -%}
    +        custom_{{ node.config['alias'] if 'alias' in node.config else '' | trim }}
    +    {%- endif -%}
    +{%- endmacro %}
    +
    +{% macro string_literal(s) -%}
    +  {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }}
    +{%- endmacro %}
    +
    +{% macro default__string_literal(s) %}
    +    '{{ s }}'::text
    +{% endmacro %}
    +"""
    +
    +schema_yml = """
    +version: 2
    +
    +models:
    +  - name: model1
    +    columns:
    +      - name: model_name
    +        tests:
    +          - accepted_values:
    +             values: ['custom_alias']
    +  - name: model2
    +    columns:
    +      - name: model_name
    +        tests:
    +          - accepted_values:
    +             values: ['model2']
    +
    +"""
    diff --git a/tests/functional/custom_aliases/test_custom_aliases.py b/tests/functional/custom_aliases/test_custom_aliases.py
    new file mode 100644
    index 00000000000..86b44c3b3f0
    --- /dev/null
    +++ b/tests/functional/custom_aliases/test_custom_aliases.py
    @@ -0,0 +1,49 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt
    +
    +from tests.functional.custom_aliases.fixtures import (
    +    model1_sql,
    +    model2_sql,
    +    macros_sql,
    +    macros_config_sql,
    +    schema_yml,
    +)
    +
    +
    +class TestAliases:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"model1.sql": model1_sql, "model2.sql": model2_sql, "schema.yml": schema_yml}
    +
    +    @pytest.fixture(scope="class")
    +    def macros(self):
    +        return {
    +            "macros.sql": macros_sql,
    +        }
    +
    +    def test_customer_alias_name(self, project):
    +        results = run_dbt(["run"])
    +        assert len(results) == 2
    +
    +        results = run_dbt(["test"])
    +        assert len(results) == 2
    +
    +
    +class TestAliasesWithConfig:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"model1.sql": model1_sql, "model2.sql": model2_sql, "schema.yml": schema_yml}
    +
    +    @pytest.fixture(scope="class")
    +    def macros(self):
    +        return {
    +            "macros.sql": macros_config_sql,
    +        }
    +
    +    def test_customer_alias_name(self, project):
    +        results = run_dbt(["run"])
    +        assert len(results) == 2
    +
    +        results = run_dbt(["test"])
    +        assert len(results) == 2
    diff --git a/tests/functional/custom_singular_tests/test_custom_singular_tests.py b/tests/functional/custom_singular_tests/test_custom_singular_tests.py
    index 9a8df339374..aec0586b873 100644
    --- a/tests/functional/custom_singular_tests/test_custom_singular_tests.py
    +++ b/tests/functional/custom_singular_tests/test_custom_singular_tests.py
    @@ -103,3 +103,8 @@ def test_data_tests(self, project, tests):
                 assert result.status == "fail"
                 assert not result.skipped
                 assert result.failures > 0
    +            assert result.adapter_response == {
    +                "_message": "SELECT 1",
    +                "code": "SELECT",
    +                "rows_affected": 1,
    +            }
    diff --git a/tests/functional/cycles/test_cycles.py b/tests/functional/cycles/test_cycles.py
    index 0e2cdcaf911..6d2eb3fd0cc 100644
    --- a/tests/functional/cycles/test_cycles.py
    +++ b/tests/functional/cycles/test_cycles.py
    @@ -36,10 +36,7 @@
     class TestSimpleCycle:
         @pytest.fixture(scope="class")
         def models(self):
    -        return {
    -            "model_a.sql": model_a_sql,
    -            "model_b.sql": model_b_sql
    -        }
    +        return {"model_a.sql": model_a_sql, "model_b.sql": model_b_sql}
     
         def test_simple_cycle(self, project):
             with pytest.raises(RuntimeError) as exc:
    diff --git a/tests/functional/defer_state/fixtures.py b/tests/functional/defer_state/fixtures.py
    new file mode 100644
    index 00000000000..17f46f842d9
    --- /dev/null
    +++ b/tests/functional/defer_state/fixtures.py
    @@ -0,0 +1,101 @@
    +seed_csv = """id,name
    +1,Alice
    +2,Bob
    +"""
    +
    +table_model_sql = """
    +{{ config(materialized='table') }}
    +select * from {{ ref('ephemeral_model') }}
    +
    +-- establish a macro dependency to trigger state:modified.macros
    +-- depends on: {{ my_macro() }}
    +"""
    +
    +changed_table_model_sql = """
    +{{ config(materialized='table') }}
    +select 1 as fun
    +"""
    +
    +view_model_sql = """
    +select * from {{ ref('seed') }}
    +
    +-- establish a macro dependency that trips infinite recursion if not handled
    +-- depends on: {{ my_infinitely_recursive_macro() }}
    +"""
    +
    +changed_view_model_sql = """
    +select * from no.such.table
    +"""
    +
    +ephemeral_model_sql = """
    +{{ config(materialized='ephemeral') }}
    +select * from {{ ref('view_model') }}
    +"""
    +
    +changed_ephemeral_model_sql = """
    +{{ config(materialized='ephemeral') }}
    +select * from no.such.table
    +"""
    +
    +schema_yml = """
    +version: 2
    +models:
    +  - name: view_model
    +    columns:
    +      - name: id
    +        tests:
    +          - unique:
    +              severity: error
    +          - not_null
    +      - name: name
    +"""
    +
    +exposures_yml = """
    +version: 2
    +exposures:
    +  - name: my_exposure
    +    type: application
    +    depends_on:
    +      - ref('view_model')
    +    owner:
    +      email: test@example.com
    +"""
    +
    +macros_sql = """
    +{% macro my_macro() %}
    +    {% do log('in a macro' ) %}
    +{% endmacro %}
    +"""
    +
    +infinite_macros_sql = """
    +{# trigger infinite recursion if not handled #}
    +
    +{% macro my_infinitely_recursive_macro() %}
    +  {{ return(adapter.dispatch('my_infinitely_recursive_macro')()) }}
    +{% endmacro %}
    +
    +{% macro default__my_infinitely_recursive_macro() %}
    +    {% if unmet_condition %}
    +        {{ my_infinitely_recursive_macro() }}
    +    {% else %}
    +        {{ return('') }}
    +    {% endif %}
    +{% endmacro %}
    +"""
    +
    +snapshot_sql = """
    +{% snapshot my_cool_snapshot %}
    +
    +    {{
    +        config(
    +            target_database=database,
    +            target_schema=schema,
    +            unique_key='id',
    +            strategy='check',
    +            check_cols=['id'],
    +        )
    +    }}
    +    select * from {{ ref('view_model') }}
    +
    +{% endsnapshot %}
    +"""
    diff --git a/tests/functional/defer_state/test_defer_state.py b/tests/functional/defer_state/test_defer_state.py
    new file mode 100644
    index 00000000000..134cae1c626
    --- /dev/null
    +++ b/tests/functional/defer_state/test_defer_state.py
    @@ -0,0 +1,273 @@
    +import json
    +import os
    +import shutil
    +from copy import deepcopy
    +
    +import pytest
    +
    +from dbt.tests.util import run_dbt, write_file, rm_file
    +
    +from dbt.exceptions import DbtRuntimeError
    +
    +from tests.functional.defer_state.fixtures import (
    +    seed_csv,
    +    table_model_sql,
    +    changed_table_model_sql,
    +    view_model_sql,
    +    changed_view_model_sql,
    +    ephemeral_model_sql,
    +    changed_ephemeral_model_sql,
    +    schema_yml,
    +    exposures_yml,
    +    macros_sql,
    +    infinite_macros_sql,
    +    snapshot_sql,
    +)
    +
    +
    +class BaseDeferState:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "table_model.sql": table_model_sql,
    +            "view_model.sql": view_model_sql,
    +            "ephemeral_model.sql": ephemeral_model_sql,
    +            "schema.yml": schema_yml,
    +            "exposures.yml": exposures_yml,
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def macros(self):
    +        return {
    +            "macros.sql": macros_sql,
    +            "infinite_macros.sql": infinite_macros_sql,
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def seeds(self):
    +        return {
    +            "seed.csv": seed_csv,
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def snapshots(self):
    +        return {
    +            "snapshot.sql": snapshot_sql,
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def other_schema(self, unique_schema):
    +        return unique_schema + "_other"
    +
    +    @property
    +    def project_config_update(self):
    +        return {
    +            "seeds": {
    +                "test": {
    +                    "quote_columns": False,
    +                }
    +            }
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def profiles_config_update(self, dbt_profile_target, unique_schema, other_schema):
    +        outputs = {"default": dbt_profile_target, "otherschema": deepcopy(dbt_profile_target)}
    +        outputs["default"]["schema"] = unique_schema
    +        outputs["otherschema"]["schema"] = other_schema
    +        return {"test": {"outputs": outputs, "target": "default"}}
    +
    +    def copy_state(self):
    +        if not os.path.exists("state"):
    +            os.makedirs("state")
    +        shutil.copyfile("target/manifest.json", "state/manifest.json")
    +
    +    def run_and_save_state(self):
    +        results = run_dbt(["seed"])
    +        assert len(results) == 1
    +        assert not any(r.node.deferred for r in results)
    +        results = run_dbt(["run"])
    +        assert len(results) == 2
    +        assert not any(r.node.deferred for r in results)
    +        results = run_dbt(["test"])
    +        assert len(results) == 2
    +
    +        # copy files
    +        self.copy_state()
    +
    +
    +class TestDeferStateUnsupportedCommands(BaseDeferState):
    +    def test_unsupported_commands(self, project):
    +        # make sure these commands don"t work with --defer
    +        with pytest.raises(SystemExit):
    +            run_dbt(["seed", "--defer"])
    +
    +    def test_no_state(self, project):
    +        # no "state" files present, snapshot fails
    +        with pytest.raises(DbtRuntimeError):
    +            run_dbt(["snapshot", "--state", "state", "--defer"])
    +
    +
    +class TestRunCompileState(BaseDeferState):
    +    def test_run_and_compile_defer(self, project):
    +        self.run_and_save_state()
    +
    +        # defer test, it succeeds
    +        results = run_dbt(["compile", "--state", "state", "--defer"])
    +        assert len(results.results) == 6
    +        assert results.results[0].node.name == "seed"
    +
    +
    +class TestSnapshotState(BaseDeferState):
    +    def test_snapshot_state_defer(self, project):
    +        self.run_and_save_state()
    +        # snapshot succeeds without --defer
    +        run_dbt(["snapshot"])
    +        # copy files
    +        self.copy_state()
    +        # defer test, it succeeds
    +        run_dbt(["snapshot", "--state", "state", "--defer"])
    +        # favor_state test, it succeeds
    +        run_dbt(["snapshot", "--state", "state", "--defer", "--favor-state"])
    +
    +
    +class TestRunDeferState(BaseDeferState):
    +    def test_run_and_defer(self, project, unique_schema, other_schema):
    +        project.create_test_schema(other_schema)
    +        self.run_and_save_state()
    +
    +        # test tests first, because run will change things
    +        # no state, wrong schema, failure.
    +        run_dbt(["test", "--target", "otherschema"], expect_pass=False)
    +
    +        # test generate docs
    +        # no state, wrong schema, empty nodes
    +        catalog = run_dbt(["docs", "generate", "--target", "otherschema"])
    +        assert not catalog.nodes
    +
    +        # no state, run also fails
    +        run_dbt(["run", "--target", "otherschema"], expect_pass=False)
    +
    +        # defer test, it succeeds
    +        results = run_dbt(
    +            ["test", "-m", "view_model+", "--state", "state", "--defer", "--target", "otherschema"]
    +        )
    +
    +        # defer docs generate with state, catalog refers schema from the happy times
    +        catalog = run_dbt(
    +            [
    +                "docs",
    +                "generate",
    +                "-m",
    +                "view_model+",
    +                "--state",
    +                "state",
    +                "--defer",
    +                "--target",
    +                "otherschema",
    +            ]
    +        )
    +        assert other_schema not in catalog.nodes["seed.test.seed"].metadata.schema
    +        assert unique_schema in catalog.nodes["seed.test.seed"].metadata.schema
    +
    +        # with state it should work though
    +        results = run_dbt(
    +            ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"]
    +        )
    +        assert other_schema not in results[0].node.compiled_code
    +        assert unique_schema in results[0].node.compiled_code
    +
    +        with open("target/manifest.json") as fp:
    +            data = json.load(fp)
    +        assert data["nodes"]["seed.test.seed"]["deferred"]
    +
    +        assert len(results) == 1
    +
    +
    +class TestRunDeferStateChangedModel(BaseDeferState):
    +    def test_run_defer_state_changed_model(self, project):
    +        self.run_and_save_state()
    +
    +        # change "view_model"
    +        write_file(changed_view_model_sql, "models", "view_model.sql")
    +
    +        # the sql here is just wrong, so it should fail
    +        run_dbt(
    +            ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"],
    +            expect_pass=False,
    +        )
    +        # but this should work since we just use the old happy model
    +        run_dbt(
    +            ["run", "-m", "table_model", "--state", "state", "--defer", "--target", "otherschema"],
    +            expect_pass=True,
    +        )
    +
    +        # change "ephemeral_model"
    +        write_file(changed_ephemeral_model_sql, "models", "ephemeral_model.sql")
    +        # this should fail because the table model refs a broken ephemeral
    +        # model, which it should see
    +        run_dbt(
    +            ["run", "-m", "table_model", "--state", "state", "--defer", "--target", "otherschema"],
    +            expect_pass=False,
    +        )
    +
    +
    +class TestRunDeferStateIFFNotExists(BaseDeferState):
    +    def test_run_defer_iff_not_exists(self, project, unique_schema, other_schema):
    +        project.create_test_schema(other_schema)
    +        self.run_and_save_state()
    +
    +        results = run_dbt(["seed", "--target", "otherschema"])
    +        assert len(results) == 1
    +        results = run_dbt(["run", "--state", "state", "--defer", "--target", "otherschema"])
    +        assert len(results) == 2
    +
    +        # because the seed now exists in our "other" schema, we should prefer it over the one
    +        # available from state
    +        assert other_schema in results[0].node.compiled_code
    +
    +        # this time with --favor-state: even though the seed now exists in our "other" schema,
    +        # we should still favor the one available from state
    +        results = run_dbt(
    +            ["run", "--state", "state", "--defer", "--favor-state", "--target", "otherschema"]
    +        )
    +        assert len(results) == 2
    +        assert other_schema not in results[0].node.compiled_code
    +
    +
    +class TestDeferStateDeletedUpstream(BaseDeferState):
    +    def test_run_defer_deleted_upstream(self, project, unique_schema, other_schema):
    +        project.create_test_schema(other_schema)
    +        self.run_and_save_state()
    +
    +        # remove "ephemeral_model" + change "table_model"
    +        rm_file("models", "ephemeral_model.sql")
    +        write_file(changed_table_model_sql, "models", "table_model.sql")
    +
    +        # ephemeral_model is now gone. previously this caused a
    +        # keyerror (dbt#2875), now it should pass
    +        run_dbt(
    +            ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"],
    +            expect_pass=True,
    +        )
    +
    +        # despite deferral, we should use models just created in our schema
    +        results = run_dbt(["test", "--state", "state", "--defer", "--target", "otherschema"])
    +        assert other_schema in results[0].node.compiled_code
    +
    +        # this time with --favor-state: prefer the models in the "other" schema, even though they exist in ours
    +        run_dbt(
    +            [
    +                "run",
    +                "-m",
    +                "view_model",
    +                "--state",
    +                "state",
    +                "--defer",
    +                "--favor-state",
    +                "--target",
    +                "otherschema",
    +            ],
    +            expect_pass=True,
    +        )
    +        results = run_dbt(["test", "--state", "state", "--defer", "--favor-state"])
    +        assert other_schema not in results[0].node.compiled_code
    diff --git a/tests/functional/defer_state/test_modified_state.py b/tests/functional/defer_state/test_modified_state.py
    new file mode 100644
    index 00000000000..80e3d455da1
    --- /dev/null
    +++ b/tests/functional/defer_state/test_modified_state.py
    @@ -0,0 +1,263 @@
    +import os
    +import random
    +import shutil
    +import string
    +
    +import pytest
    +
    +from dbt.tests.util import run_dbt, update_config_file, write_file
    +
    +from dbt.exceptions import CompilationError
    +
    +from tests.functional.defer_state.fixtures import (
    +    seed_csv,
    +    table_model_sql,
    +    view_model_sql,
    +    ephemeral_model_sql,
    +    schema_yml,
    +    exposures_yml,
    +    macros_sql,
    +    infinite_macros_sql,
    +)
    +
    +
    +class BaseModifiedState:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "table_model.sql": table_model_sql,
    +            "view_model.sql": view_model_sql,
    +            "ephemeral_model.sql": ephemeral_model_sql,
    +            "schema.yml": schema_yml,
    +            "exposures.yml": exposures_yml,
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def macros(self):
    +        return {
    +            "macros.sql": macros_sql,
    +            "infinite_macros.sql": infinite_macros_sql,
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def seeds(self):
    +        return {
    +            "seed.csv": seed_csv,
    +        }
    +
    +    @property
    +    def project_config_update(self):
    +        return {
    +            "seeds": {
    +                "test": {
    +                    "quote_columns": False,
    +                }
    +            }
    +        }
    +
    +    def copy_state(self):
    +        if not os.path.exists("state"):
    +            os.makedirs("state")
    +        shutil.copyfile("target/manifest.json", "state/manifest.json")
    +
    +    def run_and_save_state(self):
    +        run_dbt(["seed"])
    +        run_dbt(["run"])
    +        self.copy_state()
    +
    +
    +class TestChangedSeedContents(BaseModifiedState):
    +    def test_changed_seed_contents_state(self, project):
    +        self.run_and_save_state()
    +        results = run_dbt(
    +            ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"],
    +            expect_pass=True,
    +        )
    +        assert len(results) == 0
    +
    +        # add a new row to the seed
    +        changed_seed_contents = seed_csv + "\n" + "3,carl"
    +        write_file(changed_seed_contents, "seeds", "seed.csv")
    +
    +        results = run_dbt(
    +            ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"]
    +        )
    +        assert len(results) == 1
    +        assert results[0] == "test.seed"
    +
    +        results = run_dbt(["ls", "--select", "state:modified", "--state", "./state"])
    +        assert len(results) == 1
    +        assert results[0] == "test.seed"
    +
    +        results = run_dbt(["ls", "--select", "state:modified+", "--state", "./state"])
    +        assert len(results) == 7
    +        assert set(results) == {
    +            "test.seed",
    +            "test.table_model",
    +            "test.view_model",
    +            "test.ephemeral_model",
    +            "test.not_null_view_model_id",
    +            "test.unique_view_model_id",
    +            "exposure:test.my_exposure",
    +        }
    +
    +        shutil.rmtree("./state")
    +        self.copy_state()
    +
    +        # make a very big seed
    +        # assume each line is ~2 bytes + len(name)
    +        target_size = 1 * 1024 * 1024
    +        line_size = 64
    +        num_lines = target_size // line_size
    +        maxlines = num_lines + 4
    +        seed_lines = [seed_csv]
    +        for idx in range(4, maxlines):
    +            value = "".join(random.choices(string.ascii_letters, k=62))
    +            seed_lines.append(f"{idx},{value}")
    +        seed_contents = "\n".join(seed_lines)
    +        write_file(seed_contents, "seeds", "seed.csv")
    +
    +        # now if we run again, we should get a warning
    +        results = run_dbt(
    +            ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"]
    +        )
    +        assert len(results) == 1
    +        assert results[0] == "test.seed"
    +
    +        with pytest.raises(CompilationError) as exc:
    +            run_dbt(
    +                [
    +                    "--warn-error",
    +                    "ls",
    +                    "--resource-type",
    +                    "seed",
    +                    "--select",
    +                    "state:modified",
    +                    "--state",
    +                    "./state",
    +                ]
    +            )
    +        assert ">1MB" in str(exc.value)
    +
    +        shutil.rmtree("./state")
    +        self.copy_state()
    +
    +        # once it"s in path mode, we don"t mark it as modified if it changes
    +        write_file(seed_contents + "\n1,test", "seeds", "seed.csv")
    +
    +        results = run_dbt(
    +            ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"],
    +            expect_pass=True,
    +        )
    +        assert len(results) == 0
    +
    +
    +class TestChangedSeedConfig(BaseModifiedState):
    +    def test_changed_seed_config(self, project):
    +        self.run_and_save_state()
    +        results = run_dbt(
    +            ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"],
    +            expect_pass=True,
    +        )
    +        assert len(results) == 0
    +
    +        update_config_file({"seeds": {"test": {"quote_columns": False}}}, "dbt_project.yml")
    +
    +        # quoting change -> seed changed
    +        results = run_dbt(
    +            ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"]
    +        )
    +        assert len(results) == 1
    +        assert results[0] == "test.seed"
    +
    +
    +class TestUnrenderedConfigSame(BaseModifiedState):
    +    def test_unrendered_config_same(self, project):
    +        self.run_and_save_state()
    +        results = run_dbt(
    +            ["ls", "--resource-type", "model", "--select", "state:modified", "--state", "./state"],
    +            expect_pass=True,
    +        )
    +        assert len(results) == 0
    +
    +        # although this is the default value, dbt will recognize it as a change
    +        # for previously-unconfigured models, because it"s been explicitly set
    +        update_config_file({"models": {"test": {"materialized": "view"}}}, "dbt_project.yml")
    +        results = run_dbt(
    +            ["ls", "--resource-type", "model", "--select", "state:modified", "--state", "./state"]
    +        )
    +        assert len(results) == 1
    +        assert results[0] == "test.view_model"
    +
    +
    +class TestChangedModelContents(BaseModifiedState):
    +    def test_changed_model_contents(self, project):
    +        self.run_and_save_state()
    +        results = run_dbt(["run", "--models", "state:modified", "--state", "./state"])
    +        assert len(results) == 0
    +
    +        table_model_update = """
    +        {{ config(materialized="table") }}
    +
    +        select * from {{ ref("seed") }}
    +        """
    +
    +        write_file(table_model_update, "models", "table_model.sql")
    +
    +        results = run_dbt(["run", "--models", "state:modified", "--state", "./state"])
    +        assert len(results) == 1
    +        assert results[0].node.name == "table_model"
    +
    +
    +class TestNewMacro(BaseModifiedState):
    +    def test_new_macro(self, project):
    +        self.run_and_save_state()
    +
    +        new_macro = """
    +            {% macro my_other_macro() %}
    +            {% endmacro %}
    +        """
    +
    +        # add a new macro to a new file
    +        write_file(new_macro, "macros", "second_macro.sql")
    +
    +        results = run_dbt(["run", "--models", "state:modified", "--state", "./state"])
    +        assert len(results) == 0
    +
    +        os.remove("macros/second_macro.sql")
    +        # add a new macro to the existing file
    +        with open("macros/macros.sql", "a") as fp:
    +            fp.write(new_macro)
    +
    +        results = run_dbt(["run", "--models", "state:modified", "--state", "./state"])
    +        assert len(results) == 0
    +
    +
    +class TestChangedMacroContents(BaseModifiedState):
    +    def test_changed_macro_contents(self, project):
    +        self.run_and_save_state()
    +
    +        # modify an existing macro
    +        updated_macro = """
    +        {% macro my_macro() %}
    +            {% do log("in a macro", info=True) %}
    +        {% endmacro %}
    +        """
    +        write_file(updated_macro, "macros", "macros.sql")
    +
    +        # table_model calls this macro
    +        results = run_dbt(["run", "--models", "state:modified", "--state", "./state"])
    +        assert len(results) == 1
    +
    +
    +class TestChangedExposure(BaseModifiedState):
    +    def test_changed_exposure(self, project):
    +        self.run_and_save_state()
    +
    +        # add an "owner.name" to existing exposure
    +        updated_exposure = exposures_yml + "\n      name: John Doe\n"
    +        write_file(updated_exposure, "models", "exposures.yml")
    +
    +        results = run_dbt(["run", "--models", "+state:modified", "--state", "./state"])
    +        assert len(results) == 1
    +        assert results[0].node.name == "view_model"
    diff --git a/tests/functional/defer_state/test_run_results_state.py b/tests/functional/defer_state/test_run_results_state.py
    new file mode 100644
    index 00000000000..aa1dc549272
    --- /dev/null
    +++ b/tests/functional/defer_state/test_run_results_state.py
    @@ -0,0 +1,494 @@
    +import os
    +import shutil
    +
    +import pytest
    +
    +from dbt.tests.util import run_dbt, write_file
    +
    +from tests.functional.defer_state.fixtures import (
    +    seed_csv,
    +    table_model_sql,
    +    view_model_sql,
    +    ephemeral_model_sql,
    +    schema_yml,
    +    exposures_yml,
    +    macros_sql,
    +    infinite_macros_sql,
    +)
    +
    +
    +class BaseRunResultsState:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "table_model.sql": table_model_sql,
    +            "view_model.sql": view_model_sql,
    +            "ephemeral_model.sql": ephemeral_model_sql,
    +            "schema.yml": schema_yml,
    +            "exposures.yml": exposures_yml,
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def macros(self):
    +        return {
    +            "macros.sql": macros_sql,
    +            "infinite_macros.sql": infinite_macros_sql,
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def seeds(self):
    +        return {
    +            "seed.csv": seed_csv,
    +        }
    +
    +    @property
    +    def project_config_update(self):
    +        return {
    +            "seeds": {
    +                "test": {
    +                    "quote_columns": False,
    +                }
    +            }
    +        }
    +
    +    def clear_state(self):
    +        shutil.rmtree("./state")
    +
    +    def copy_state(self):
    +        if not os.path.exists("state"):
    +            os.makedirs("state")
    +        shutil.copyfile("target/manifest.json", "state/manifest.json")
    +        shutil.copyfile("target/run_results.json", "state/run_results.json")
    +
    +    def run_and_save_state(self):
    +        run_dbt(["build"])
    +        self.copy_state()
    +
    +    def rebuild_run_dbt(self, expect_pass=True):
    +        self.clear_state()
    +        run_dbt(["build"], expect_pass=expect_pass)
    +        self.copy_state()
    +
    +    def update_view_model_bad_sql(self):
    +        # update view model to generate a failure case
    +        not_unique_sql = "select * from forced_error"
    +        write_file(not_unique_sql, "models", "view_model.sql")
    +
    +    def update_view_model_failing_tests(self, with_dupes=True, with_nulls=False):
    +        # test failure on build tests
    +        # fail the unique test
    +        select_1 = "select 1 as id"
    +        select_stmts = [select_1]
    +        if with_dupes:
    +            select_stmts.append(select_1)
    +        if with_nulls:
    +            select_stmts.append("select null as id")
    +        failing_tests_sql = " union all ".join(select_stmts)
    +        write_file(failing_tests_sql, "models", "view_model.sql")
    +
    +    def update_unique_test_severity_warn(self):
    +        # change the unique test severity from error to warn and reuse the same view_model.sql changes above
    +        new_config = schema_yml.replace("error", "warn")
    +        write_file(new_config, "models", "schema.yml")
    +
    +
    +class TestSeedRunResultsState(BaseRunResultsState):
    +    def test_seed_run_results_state(self, project):
    +        self.run_and_save_state()
    +        self.clear_state()
    +        run_dbt(["seed"])
    +        self.copy_state()
    +        results = run_dbt(
    +            ["ls", "--resource-type", "seed", "--select", "result:success", "--state", "./state"],
    +            expect_pass=True,
    +        )
    +        assert len(results) == 1
    +        assert results[0] == "test.seed"
    +
    +        results = run_dbt(["ls", "--select", "result:success", "--state", "./state"])
    +        assert len(results) == 1
    +        assert results[0] == "test.seed"
    +
    +        results = run_dbt(["ls", "--select", "result:success+", "--state", "./state"])
    +        assert len(results) == 7
    +        assert set(results) == {
    +            "test.seed",
    +            "test.table_model",
    +            "test.view_model",
    +            "test.ephemeral_model",
    +            "test.not_null_view_model_id",
    +            "test.unique_view_model_id",
    +            "exposure:test.my_exposure",
    +        }
    +
    +        # add a new faulty row to the seed
    +        changed_seed_contents = seed_csv + "\n" + "\\\3,carl"
    +        write_file(changed_seed_contents, "seeds", "seed.csv")
    +
    +        self.clear_state()
    +        run_dbt(["seed"], expect_pass=False)
    +        self.copy_state()
    +
    +        results = run_dbt(
    +            ["ls", "--resource-type", "seed", "--select", "result:error", "--state", "./state"],
    +            expect_pass=True,
    +        )
    +        assert len(results) == 1
    +        assert results[0] == "test.seed"
    +
    +        results = run_dbt(["ls", "--select", "result:error", "--state", "./state"])
    +        assert len(results) == 1
    +        assert results[0] == "test.seed"
    +
    +        results = run_dbt(["ls", "--select", "result:error+", "--state", "./state"])
    +        assert len(results) == 7
    +        assert set(results) == {
    +            "test.seed",
    +            "test.table_model",
    +            "test.view_model",
    +            "test.ephemeral_model",
    +            "test.not_null_view_model_id",
    +            "test.unique_view_model_id",
    +            "exposure:test.my_exposure",
    +        }
    +
    +
    +class TestBuildRunResultsState(BaseRunResultsState):
    +    def test_build_run_results_state(self, project):
    +        self.run_and_save_state()
    +        results = run_dbt(["build", "--select", "result:error", "--state", "./state"])
    +        assert len(results) == 0
    +
    +        self.update_view_model_bad_sql()
    +        self.rebuild_run_dbt(expect_pass=False)
    +
    +        results = run_dbt(
    +            ["build", "--select", "result:error", "--state", "./state"], expect_pass=False
    +        )
    +        assert len(results) == 3
    +        nodes = set([elem.node.name for elem in results])
    +        assert nodes == {"view_model", "not_null_view_model_id", "unique_view_model_id"}
    +
    +        results = run_dbt(["ls", "--select", "result:error", "--state", "./state"])
    +        assert len(results) == 3
    +        assert set(results) == {
    +            "test.view_model",
    +            "test.not_null_view_model_id",
    +            "test.unique_view_model_id",
    +        }
    +
    +        results = run_dbt(
    +            ["build", "--select", "result:error+", "--state", "./state"], expect_pass=False
    +        )
    +        assert len(results) == 4
    +        nodes = set([elem.node.name for elem in results])
    +        assert nodes == {
    +            "table_model",
    +            "view_model",
    +            "not_null_view_model_id",
    +            "unique_view_model_id",
    +        }
    +
    +        results = run_dbt(["ls", "--select", "result:error+", "--state", "./state"])
    +        assert len(results) == 6  # includes exposure
    +        assert set(results) == {
    +            "test.table_model",
    +            "test.view_model",
    +            "test.ephemeral_model",
    +            "test.not_null_view_model_id",
    +            "test.unique_view_model_id",
    +            "exposure:test.my_exposure",
    +        }
    +
    +        self.update_view_model_failing_tests()
    +        self.rebuild_run_dbt(expect_pass=False)
    +
    +        results = run_dbt(
    +            ["build", "--select", "result:fail", "--state", "./state"], expect_pass=False
    +        )
    +        assert len(results) == 1
    +        assert results[0].node.name == "unique_view_model_id"
    +
    +        results = run_dbt(["ls", "--select", "result:fail", "--state", "./state"])
    +        assert len(results) == 1
    +        assert results[0] == "test.unique_view_model_id"
    +
    +        results = run_dbt(
    +            ["build", "--select", "result:fail+", "--state", "./state"], expect_pass=False
    +        )
    +        assert len(results) == 2
    +        nodes = set([elem.node.name for elem in results])
    +        assert nodes == {"table_model", "unique_view_model_id"}
    +
    +        results = run_dbt(["ls", "--select", "result:fail+", "--state", "./state"])
    +        assert len(results) == 1
    +        assert set(results) == {"test.unique_view_model_id"}
    +
    +        self.update_unique_test_severity_warn()
    +        self.rebuild_run_dbt(expect_pass=True)
    +
    +        results = run_dbt(
    +            ["build", "--select", "result:warn", "--state", "./state"], expect_pass=True
    +        )
    +        assert len(results) == 1
    +        assert results[0].node.name == "unique_view_model_id"
    +
    +        results = run_dbt(["ls", "--select", "result:warn", "--state", "./state"])
    +        assert len(results) == 1
    +        assert results[0] == "test.unique_view_model_id"
    +
    +        results = run_dbt(
    +            ["build", "--select", "result:warn+", "--state", "./state"], expect_pass=True
    +        )
    +        assert len(results) == 2  # includes table_model to be run
    +        nodes = set([elem.node.name for elem in results])
    +        assert nodes == {"table_model", "unique_view_model_id"}
    +
    +        results = run_dbt(["ls", "--select", "result:warn+", "--state", "./state"])
    +        assert len(results) == 1
    +        assert set(results) == {"test.unique_view_model_id"}
    +
    +
    +class TestRunRunResultsState(BaseRunResultsState):
    +    def test_run_run_results_state(self, project):
    +        self.run_and_save_state()
    +        results = run_dbt(
    +            ["run", "--select", "result:success", "--state", "./state"], expect_pass=True
    +        )
    +        assert len(results) == 2
    +        assert results[0].node.name == "view_model"
    +        assert results[1].node.name == "table_model"
    +
    +        # clear state and rerun upstream view model to test + operator
    +        self.clear_state()
    +        run_dbt(["run", "--select", "view_model"], expect_pass=True)
    +        self.copy_state()
    +        results = run_dbt(
    +            ["run", "--select", "result:success+", "--state", "./state"], expect_pass=True
    +        )
    +        assert len(results) == 2
    +        assert results[0].node.name == "view_model"
    +        assert results[1].node.name == "table_model"
    +
    +        # check we are starting from a place with 0 errors
    +        results = run_dbt(["run", "--select", "result:error", "--state", "./state"])
    +        assert len(results) == 0
    +
    +        self.update_view_model_bad_sql()
    +        self.clear_state()
    +        run_dbt(["run"], expect_pass=False)
    +        self.copy_state()
    +
    +        # test single result selector on error
    +        results = run_dbt(
    +            ["run", "--select", "result:error", "--state", "./state"], expect_pass=False
    +        )
    +        assert len(results) == 1
    +        assert results[0].node.name == "view_model"
    +
    +        # test + operator selection on error
    +        results = run_dbt(
    +            ["run", "--select", "result:error+", "--state", "./state"], expect_pass=False
    +        )
    +        assert len(results) == 2
    +        assert results[0].node.name == "view_model"
    +        assert results[1].node.name == "table_model"
    +
    +        # single result selector on skipped. Expect this to pass becase underlying view already defined above
    +        results = run_dbt(
    +            ["run", "--select", "result:skipped", "--state", "./state"], expect_pass=True
    +        )
    +        assert len(results) == 1
    +        assert results[0].node.name == "table_model"
    +
    +        # add a downstream model that depends on table_model for skipped+ selector
    +        downstream_model_sql = "select * from {{ref('table_model')}}"
    +        write_file(downstream_model_sql, "models", "table_model_downstream.sql")
    +
    +        self.clear_state()
    +        run_dbt(["run"], expect_pass=False)
    +        self.copy_state()
    +
    +        results = run_dbt(
    +            ["run", "--select", "result:skipped+", "--state", "./state"], expect_pass=True
    +        )
    +        assert len(results) == 2
    +        assert results[0].node.name == "table_model"
    +        assert results[1].node.name == "table_model_downstream"
    +
    +
    +class TestTestRunResultsState(BaseRunResultsState):
    +    def test_test_run_results_state(self, project):
    +        self.run_and_save_state()
    +        # run passed nodes
    +        results = run_dbt(
    +            ["test", "--select", "result:pass", "--state", "./state"], expect_pass=True
    +        )
    +        assert len(results) == 2
    +        nodes = set([elem.node.name for elem in results])
    +        assert nodes == {"unique_view_model_id", "not_null_view_model_id"}
    +
    +        # run passed nodes with + operator
    +        results = run_dbt(
    +            ["test", "--select", "result:pass+", "--state", "./state"], expect_pass=True
    +        )
    +        assert len(results) == 2
    +        nodes = set([elem.node.name for elem in results])
    +        assert nodes == {"unique_view_model_id", "not_null_view_model_id"}
    +
    +        self.update_view_model_failing_tests()
    +        self.rebuild_run_dbt(expect_pass=False)
    +
    +        # test with failure selector
    +        results = run_dbt(
    +            ["test", "--select", "result:fail", "--state", "./state"], expect_pass=False
    +        )
    +        assert len(results) == 1
    +        assert results[0].node.name == "unique_view_model_id"
    +
    +        # test with failure selector and + operator
    +        results = run_dbt(
    +            ["test", "--select", "result:fail+", "--state", "./state"], expect_pass=False
    +        )
    +        assert len(results) == 1
    +        assert results[0].node.name == "unique_view_model_id"
    +
    +        self.update_unique_test_severity_warn()
    +        # rebuild - expect_pass = True because we changed the error to a warning this time around
    +        self.rebuild_run_dbt(expect_pass=True)
    +
    +        # test with warn selector
    +        results = run_dbt(
    +            ["test", "--select", "result:warn", "--state", "./state"], expect_pass=True
    +        )
    +        assert len(results) == 1
    +        assert results[0].node.name == "unique_view_model_id"
    +
    +        # test with warn selector and + operator
    +        results = run_dbt(
    +            ["test", "--select", "result:warn+", "--state", "./state"], expect_pass=True
    +        )
    +        assert len(results) == 1
    +        assert results[0].node.name == "unique_view_model_id"
    +
    +
    +class TestConcurrentSelectionRunResultsState(BaseRunResultsState):
    +    def test_concurrent_selection_run_run_results_state(self, project):
    +        self.run_and_save_state()
    +        results = run_dbt(
    +            ["run", "--select", "state:modified+", "result:error+", "--state", "./state"]
    +        )
    +        assert len(results) == 0
    +
    +        self.update_view_model_bad_sql()
    +        self.clear_state()
    +        run_dbt(["run"], expect_pass=False)
    +        self.copy_state()
    +
    +        # add a new failing dbt model
    +        bad_sql = "select * from forced_error"
    +        write_file(bad_sql, "models", "table_model_modified_example.sql")
    +
    +        results = run_dbt(
    +            ["run", "--select", "state:modified+", "result:error+", "--state", "./state"],
    +            expect_pass=False,
    +        )
    +        assert len(results) == 3
    +        nodes = set([elem.node.name for elem in results])
    +        assert nodes == {"view_model", "table_model_modified_example", "table_model"}
    +
    +
    +class TestConcurrentSelectionTestRunResultsState(BaseRunResultsState):
    +    def test_concurrent_selection_test_run_results_state(self, project):
    +        self.run_and_save_state()
    +        # create failure test case for result:fail selector
    +        self.update_view_model_failing_tests(with_nulls=True)
    +
    +        # run dbt build again to trigger test errors
    +        self.rebuild_run_dbt(expect_pass=False)
    +
    +        # get the failures from
    +        results = run_dbt(
    +            [
    +                "test",
    +                "--select",
    +                "result:fail",
    +                "--exclude",
    +                "not_null_view_model_id",
    +                "--state",
    +                "./state",
    +            ],
    +            expect_pass=False,
    +        )
    +        assert len(results) == 1
    +        nodes = set([elem.node.name for elem in results])
    +        assert nodes == {"unique_view_model_id"}
    +
    +
    +class TestConcurrentSelectionBuildRunResultsState(BaseRunResultsState):
    +    def test_concurrent_selectors_build_run_results_state(self, project):
    +        self.run_and_save_state()
    +        results = run_dbt(
    +            ["build", "--select", "state:modified+", "result:error+", "--state", "./state"]
    +        )
    +        assert len(results) == 0
    +
    +        self.update_view_model_bad_sql()
    +        self.rebuild_run_dbt(expect_pass=False)
    +
    +        # add a new failing dbt model
    +        bad_sql = "select * from forced_error"
    +        write_file(bad_sql, "models", "table_model_modified_example.sql")
    +
    +        results = run_dbt(
    +            ["build", "--select", "state:modified+", "result:error+", "--state", "./state"],
    +            expect_pass=False,
    +        )
    +        assert len(results) == 5
    +        nodes = set([elem.node.name for elem in results])
    +        assert nodes == {
    +            "table_model_modified_example",
    +            "view_model",
    +            "table_model",
    +            "not_null_view_model_id",
    +            "unique_view_model_id",
    +        }
    +
    +        self.update_view_model_failing_tests()
    +
    +        # create error model case for result:error selector
    +        more_bad_sql = "select 1 as id from not_exists"
    +        write_file(more_bad_sql, "models", "error_model.sql")
    +
    +        # create something downstream from the error model to rerun
    +        downstream_model_sql = "select * from {{ ref('error_model') }} )"
    +        write_file(downstream_model_sql, "models", "downstream_of_error_model.sql")
    +
    +        # regenerate build state
    +        self.rebuild_run_dbt(expect_pass=False)
    +
    +        # modify model again to trigger the state:modified selector
    +        bad_again_sql = "select * from forced_anothererror"
    +        write_file(bad_again_sql, "models", "table_model_modified_example.sql")
    +
    +        results = run_dbt(
    +            [
    +                "build",
    +                "--select",
    +                "state:modified+",
    +                "result:error+",
    +                "result:fail+",
    +                "--state",
    +                "./state",
    +            ],
    +            expect_pass=False,
    +        )
    +        assert len(results) == 5
    +        nodes = set([elem.node.name for elem in results])
    +        assert nodes == {
    +            "error_model",
    +            "downstream_of_error_model",
    +            "table_model_modified_example",
    +            "table_model",
    +            "unique_view_model_id",
    +        }
    diff --git a/tests/functional/dependencies/test_local_dependency.py b/tests/functional/dependencies/test_local_dependency.py
    index 3e0bc5efdb7..13605028519 100644
    --- a/tests/functional/dependencies/test_local_dependency.py
    +++ b/tests/functional/dependencies/test_local_dependency.py
    @@ -184,7 +184,7 @@ def models(self):
     
         def test_missing_dependency(self, project):
             # dbt should raise a runtime exception
    -        with pytest.raises(dbt.exceptions.RuntimeException):
    +        with pytest.raises(dbt.exceptions.DbtRuntimeError):
                 run_dbt(["compile"])
     
     
    @@ -335,12 +335,12 @@ def prepare_dependencies(self, project):
             )
     
         def test_local_dependency_same_name(self, prepare_dependencies, project):
    -        with pytest.raises(dbt.exceptions.DependencyException):
    +        with pytest.raises(dbt.exceptions.DependencyError):
                 run_dbt(["deps"], expect_pass=False)
     
         def test_local_dependency_same_name_sneaky(self, prepare_dependencies, project):
             shutil.copytree("duplicate_dependency", "./dbt_packages/duplicate_dependency")
    -        with pytest.raises(dbt.exceptions.CompilationException):
    +        with pytest.raises(dbt.exceptions.CompilationError):
                 run_dbt(["compile"])
     
             # needed to avoid compilation errors from duplicate package names in test autocleanup
    diff --git a/tests/functional/deprecations/test_deprecations.py b/tests/functional/deprecations/test_deprecations.py
    index fc76289b2ee..a70b3687c69 100644
    --- a/tests/functional/deprecations/test_deprecations.py
    +++ b/tests/functional/deprecations/test_deprecations.py
    @@ -63,7 +63,7 @@ def test_data_path(self, project):
         def test_data_path_fail(self, project):
             deprecations.reset_deprecations()
             assert deprecations.active_deprecations == set()
    -        with pytest.raises(dbt.exceptions.CompilationException) as exc:
    +        with pytest.raises(dbt.exceptions.CompilationError) as exc:
                 run_dbt(["--warn-error", "debug"])
             exc_str = " ".join(str(exc.value).split())  # flatten all whitespace
             expected_msg = "The `data-paths` config has been renamed"
    @@ -107,7 +107,7 @@ def test_package_path(self, project):
         def test_package_path_not_set(self, project):
             deprecations.reset_deprecations()
             assert deprecations.active_deprecations == set()
    -        with pytest.raises(dbt.exceptions.CompilationException) as exc:
    +        with pytest.raises(dbt.exceptions.CompilationError) as exc:
                 run_dbt(["--warn-error", "clean"])
             exc_str = " ".join(str(exc.value).split())  # flatten all whitespace
             expected_msg = "path has changed from `dbt_modules` to `dbt_packages`."
    @@ -134,7 +134,7 @@ def test_package_redirect(self, project):
         def test_package_redirect_fail(self, project):
             deprecations.reset_deprecations()
             assert deprecations.active_deprecations == set()
    -        with pytest.raises(dbt.exceptions.CompilationException) as exc:
    +        with pytest.raises(dbt.exceptions.CompilationError) as exc:
                 run_dbt(["--warn-error", "deps"])
             exc_str = " ".join(str(exc.value).split())  # flatten all whitespace
             expected_msg = "The `fishtown-analytics/dbt_utils` package is deprecated in favor of `dbt-labs/dbt_utils`"
    @@ -159,7 +159,7 @@ def test_metric_handle_rename(self, project):
         def test_metric_handle_rename_fail(self, project):
             deprecations.reset_deprecations()
             assert deprecations.active_deprecations == set()
    -        with pytest.raises(dbt.exceptions.CompilationException) as exc:
    +        with pytest.raises(dbt.exceptions.CompilationError) as exc:
                 # turn off partial parsing to ensure that the metric is re-parsed
                 run_dbt(["--warn-error", "--no-partial-parse", "parse"])
             exc_str = " ".join(str(exc.value).split())  # flatten all whitespace
    @@ -182,7 +182,7 @@ def test_exposure_name(self, project):
         def test_exposure_name_fail(self, project):
             deprecations.reset_deprecations()
             assert deprecations.active_deprecations == set()
    -        with pytest.raises(dbt.exceptions.CompilationException) as exc:
    +        with pytest.raises(dbt.exceptions.CompilationError) as exc:
                 run_dbt(["--warn-error", "--no-partial-parse", "parse"])
             exc_str = " ".join(str(exc.value).split())  # flatten all whitespace
             expected_msg = "Starting in v1.3, the 'name' of an exposure should contain only letters, numbers, and underscores."
    diff --git a/tests/functional/docs/test_duplicate_docs_block.py b/tests/functional/docs/test_duplicate_docs_block.py
    new file mode 100644
    index 00000000000..2ff9459e4b3
    --- /dev/null
    +++ b/tests/functional/docs/test_duplicate_docs_block.py
    @@ -0,0 +1,35 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt
    +import dbt.exceptions
    +
    +
    +duplicate_doc_blocks_model_sql = "select 1 as id, 'joe' as first_name"
    +
    +duplicate_doc_blocks_docs_md = """{% docs my_model_doc %}
    +    a doc string
    +{% enddocs %}
    +
    +{% docs my_model_doc %}
    +    duplicate doc string
    +{% enddocs %}"""
    +
    +duplicate_doc_blocks_schema_yml = """version: 2
    +
    +models:
    +  - name: model
    +    description: "{{ doc('my_model_doc') }}"
    +"""
    +
    +
    +class TestDuplicateDocsBlock:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model.sql": duplicate_doc_blocks_model_sql,
    +            "schema.yml": duplicate_doc_blocks_schema_yml,
    +        }
    +
    +    def test_duplicate_doc_ref(self, project):
    +        with pytest.raises(dbt.exceptions.CompilationError):
    +            run_dbt(expect_pass=False)
    diff --git a/tests/functional/docs/test_good_docs_blocks.py b/tests/functional/docs/test_good_docs_blocks.py
    new file mode 100644
    index 00000000000..9fc9a7f0bb5
    --- /dev/null
    +++ b/tests/functional/docs/test_good_docs_blocks.py
    @@ -0,0 +1,171 @@
    +import json
    +import os
    +from pathlib import Path
    +import pytest
    +
    +from dbt.tests.util import run_dbt, update_config_file, write_file
    +
    +
    +good_docs_blocks_model_sql = "select 1 as id, 'joe' as first_name"
    +
    +good_docs_blocks_docs_md = """{% docs my_model_doc %}
    +My model is just a copy of the seed
    +{% enddocs %}
    +
    +{% docs my_model_doc__id %}
    +The user ID number
    +{% enddocs %}
    +
    +The following doc is never used, which should be fine.
    +{% docs my_model_doc__first_name %}
    +The user's first name (should not be shown!)
    +{% enddocs %}
    +
    +This doc is referenced by its full name
    +{% docs my_model_doc__last_name %}
    +The user's last name
    +{% enddocs %}
    +"""
    +
    +good_doc_blocks_alt_docs_md = """{% docs my_model_doc %}
    +Alt text about the model
    +{% enddocs %}
    +
    +{% docs my_model_doc__id %}
    +The user ID number with alternative text
    +{% enddocs %}
    +
    +The following doc is never used, which should be fine.
    +{% docs my_model_doc__first_name %}
    +The user's first name - don't show this text!
    +{% enddocs %}
    +
    +This doc is referenced by its full name
    +{% docs my_model_doc__last_name %}
    +The user's last name in this other file
    +{% enddocs %}
    +"""
    +
    +good_docs_blocks_schema_yml = """version: 2
    +
    +models:
    +  - name: model
    +    description: "{{ doc('my_model_doc') }}"
    +    columns:
    +      - name: id
    +        description: "{{ doc('my_model_doc__id') }}"
    +      - name: first_name
    +        description: The user's first name
    +      - name: last_name
    +        description: "{{ doc('test', 'my_model_doc__last_name') }}"
    +"""
    +
    +
    +class TestGoodDocsBlocks:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model.sql": good_docs_blocks_model_sql,
    +            "schema.yml": good_docs_blocks_schema_yml,
    +            "docs.md": good_docs_blocks_docs_md,
    +        }
    +
    +    def test_valid_doc_ref(self, project):
    +        result = run_dbt()
    +        assert len(result.results) == 1
    +
    +        assert os.path.exists("./target/manifest.json")
    +
    +        with open("./target/manifest.json") as fp:
    +            manifest = json.load(fp)
    +
    +        model_data = manifest["nodes"]["model.test.model"]
    +
    +        assert model_data["description"] == "My model is just a copy of the seed"
    +
    +        assert {
    +            "name": "id",
    +            "description": "The user ID number",
    +            "data_type": None,
    +            "meta": {},
    +            "quote": None,
    +            "tags": [],
    +        } == model_data["columns"]["id"]
    +
    +        assert {
    +            "name": "first_name",
    +            "description": "The user's first name",
    +            "data_type": None,
    +            "meta": {},
    +            "quote": None,
    +            "tags": [],
    +        } == model_data["columns"]["first_name"]
    +
    +        assert {
    +            "name": "last_name",
    +            "description": "The user's last name",
    +            "data_type": None,
    +            "meta": {},
    +            "quote": None,
    +            "tags": [],
    +        } == model_data["columns"]["last_name"]
    +
    +        assert len(model_data["columns"]) == 3
    +
    +
    +class TestGoodDocsBlocksAltPath:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"model.sql": good_docs_blocks_model_sql, "schema.yml": good_docs_blocks_schema_yml}
    +
    +    def test_alternative_docs_path(self, project):
    +        # self.use_default_project({"docs-paths": [self.dir("docs")]})
    +        docs_path = Path(project.project_root, "alt-docs")
    +        docs_path.mkdir()
    +        write_file(good_doc_blocks_alt_docs_md, project.project_root, "alt-docs", "docs.md")
    +
    +        update_config_file(
    +            {"docs-paths": [str(docs_path)]}, project.project_root, "dbt_project.yml"
    +        )
    +
    +        result = run_dbt()
    +
    +        assert len(result.results) == 1
    +
    +        assert os.path.exists("./target/manifest.json")
    +
    +        with open("./target/manifest.json") as fp:
    +            manifest = json.load(fp)
    +
    +        model_data = manifest["nodes"]["model.test.model"]
    +
    +        assert model_data["description"] == "Alt text about the model"
    +
    +        assert {
    +            "name": "id",
    +            "description": "The user ID number with alternative text",
    +            "data_type": None,
    +            "meta": {},
    +            "quote": None,
    +            "tags": [],
    +        } == model_data["columns"]["id"]
    +
    +        assert {
    +            "name": "first_name",
    +            "description": "The user's first name",
    +            "data_type": None,
    +            "meta": {},
    +            "quote": None,
    +            "tags": [],
    +        } == model_data["columns"]["first_name"]
    +
    +        assert {
    +            "name": "last_name",
    +            "description": "The user's last name in this other file",
    +            "data_type": None,
    +            "meta": {},
    +            "quote": None,
    +            "tags": [],
    +        } == model_data["columns"]["last_name"]
    +
    +        assert len(model_data["columns"]) == 3
    diff --git a/tests/functional/docs/test_invalid_doc_ref.py b/tests/functional/docs/test_invalid_doc_ref.py
    new file mode 100644
    index 00000000000..7c486938124
    --- /dev/null
    +++ b/tests/functional/docs/test_invalid_doc_ref.py
    @@ -0,0 +1,47 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt
    +import dbt.exceptions
    +
    +
    +invalid_doc_ref_model_sql = "select 1 as id, 'joe' as first_name"
    +
    +invalid_doc_ref_docs_md = """{% docs my_model_doc %}
    +My model is just a copy of the seed
    +{% enddocs %}
    +
    +{% docs my_model_doc__id %}
    +The user ID number
    +{% enddocs %}
    +
    +The following doc is never used, which should be fine.
    +{% docs my_model_doc__first_name %}
    +The user's first name
    +{% enddocs %}"""
    +
    +invalid_doc_ref_schema_yml = """version: 2
    +
    +models:
    +  - name: model
    +    description: "{{ doc('my_model_doc') }}"
    +    columns:
    +      - name: id
    +        description: "{{ doc('my_model_doc__id') }}"
    +      - name: first_name
    +        description: "{{ doc('foo.bar.my_model_doc__id') }}"
    +"""
    +
    +
    +class TestInvalidDocRef:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model.sql": invalid_doc_ref_model_sql,
    +            "docs.md": invalid_doc_ref_docs_md,
    +            "schema.yml": invalid_doc_ref_schema_yml,
    +        }
    +
    +    def test_invalid_doc_ref(self, project):
    +        # The run should fail since we could not find the docs reference.
    +        with pytest.raises(dbt.exceptions.CompilationError):
    +            run_dbt(expect_pass=False)
    diff --git a/tests/functional/docs/test_missing_docs_blocks.py b/tests/functional/docs/test_missing_docs_blocks.py
    new file mode 100644
    index 00000000000..3b6f4e540b9
    --- /dev/null
    +++ b/tests/functional/docs/test_missing_docs_blocks.py
    @@ -0,0 +1,43 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt
    +import dbt.exceptions
    +
    +
    +missing_docs_blocks_model_sql = "select 1 as id, 'joe' as first_name"
    +
    +missing_docs_blocks_docs_md = """{% docs my_model_doc %}
    +My model is just a copy of the seed
    +{% enddocs %}
    +
    +{% docs my_model_doc__id %}
    +The user ID number
    +{% enddocs %}"""
    +
    +missing_docs_blocks_schema_yml = """version: 2
    +
    +models:
    +  - name: model
    +    description: "{{ doc('my_model_doc') }}"
    +    columns:
    +      - name: id
    +        description: "{{ doc('my_model_doc__id') }}"
    +      - name: first_name
    +      # invalid reference
    +        description: "{{ doc('my_model_doc__first_name') }}"
    +"""
    +
    +
    +class TestMissingDocsBlocks:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model.sql": missing_docs_blocks_model_sql,
    +            "schema.yml": missing_docs_blocks_schema_yml,
    +            "docs.md": missing_docs_blocks_docs_md,
    +        }
    +
    +    def test_missing_doc_ref(self, project):
    +        # The run should fail since we could not find the docs reference.
    +        with pytest.raises(dbt.exceptions.CompilationError):
    +            run_dbt()
    diff --git a/tests/functional/duplicates/test_duplicate_analysis.py b/tests/functional/duplicates/test_duplicate_analysis.py
    index e9050860ad9..44dc4c6f167 100644
    --- a/tests/functional/duplicates/test_duplicate_analysis.py
    +++ b/tests/functional/duplicates/test_duplicate_analysis.py
    @@ -1,6 +1,6 @@
     import pytest
     
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     from dbt.tests.util import run_dbt
     
     
    @@ -27,7 +27,7 @@ def analyses(self):
     
         def test_duplicate_model_enabled(self, project):
             message = "dbt found two analyses with the name"
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt(["compile"])
             exc_str = " ".join(str(exc.value).split())  # flatten all whitespace
             assert message in exc_str
    diff --git a/tests/functional/duplicates/test_duplicate_exposure.py b/tests/functional/duplicates/test_duplicate_exposure.py
    index 6035da7c110..140db21cd07 100644
    --- a/tests/functional/duplicates/test_duplicate_exposure.py
    +++ b/tests/functional/duplicates/test_duplicate_exposure.py
    @@ -1,6 +1,6 @@
     import pytest
     
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     from dbt.tests.util import run_dbt
     
     
    @@ -26,6 +26,6 @@ def models(self):
     
         def test_duplicate_exposure(self, project):
             message = "dbt found two exposures with the name"
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt(["compile"])
             assert message in str(exc.value)
    diff --git a/tests/functional/duplicates/test_duplicate_macro.py b/tests/functional/duplicates/test_duplicate_macro.py
    index 1fc7282808f..35b843f5891 100644
    --- a/tests/functional/duplicates/test_duplicate_macro.py
    +++ b/tests/functional/duplicates/test_duplicate_macro.py
    @@ -1,6 +1,6 @@
     import pytest
     
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     from dbt.tests.util import run_dbt
     
     
    @@ -43,7 +43,7 @@ def macros(self):
     
         def test_duplicate_macros(self, project):
             message = 'dbt found two macros named "some_macro" in the project'
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt(["parse"])
             exc_str = " ".join(str(exc.value).split())  # flatten all whitespace
             assert message in exc_str
    @@ -64,7 +64,7 @@ def macros(self):
     
         def test_duplicate_macros(self, project):
             message = 'dbt found two macros named "some_macro" in the project'
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt(["compile"])
             exc_str = " ".join(str(exc.value).split())  # flatten all whitespace
             assert message in exc_str
    diff --git a/tests/functional/duplicates/test_duplicate_metric.py b/tests/functional/duplicates/test_duplicate_metric.py
    index e40295278b9..f8beca39c24 100644
    --- a/tests/functional/duplicates/test_duplicate_metric.py
    +++ b/tests/functional/duplicates/test_duplicate_metric.py
    @@ -1,6 +1,6 @@
     import pytest
     
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     from dbt.tests.util import run_dbt
     
     
    @@ -46,6 +46,6 @@ def models(self):
     
         def test_duplicate_metric(self, project):
             message = "dbt found two metrics with the name"
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt(["compile"])
             assert message in str(exc.value)
    diff --git a/tests/functional/duplicates/test_duplicate_model.py b/tests/functional/duplicates/test_duplicate_model.py
    index fbcd1b79671..7a53fd6de63 100644
    --- a/tests/functional/duplicates/test_duplicate_model.py
    +++ b/tests/functional/duplicates/test_duplicate_model.py
    @@ -1,6 +1,6 @@
     import pytest
     
    -from dbt.exceptions import CompilationException, DuplicateResourceName
    +from dbt.exceptions import CompilationError, DuplicateResourceNameError
     from dbt.tests.fixtures.project import write_project_files
     from dbt.tests.util import run_dbt, get_manifest
     
    @@ -54,7 +54,7 @@ def models(self):
     
         def test_duplicate_model_enabled(self, project):
             message = "dbt found two models with the name"
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt(["compile"])
             exc_str = " ".join(str(exc.value).split())  # flatten all whitespace
             assert message in exc_str
    @@ -108,7 +108,7 @@ def packages(self):
         def test_duplicate_model_enabled_across_packages(self, project):
             run_dbt(["deps"])
             message = "dbt found two models with the name"
    -        with pytest.raises(DuplicateResourceName) as exc:
    +        with pytest.raises(DuplicateResourceNameError) as exc:
                 run_dbt(["run"])
             assert message in str(exc.value)
     
    diff --git a/tests/functional/duplicates/test_duplicate_source.py b/tests/functional/duplicates/test_duplicate_source.py
    index 181aaf5d18e..1100345aabc 100644
    --- a/tests/functional/duplicates/test_duplicate_source.py
    +++ b/tests/functional/duplicates/test_duplicate_source.py
    @@ -1,6 +1,6 @@
     import pytest
     
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     from dbt.tests.util import run_dbt
     
     
    @@ -22,6 +22,6 @@ def models(self):
     
         def test_duplicate_source_enabled(self, project):
             message = "dbt found two sources with the name"
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt(["compile"])
             assert message in str(exc.value)
    diff --git a/tests/functional/exit_codes/fixtures.py b/tests/functional/exit_codes/fixtures.py
    index 23a0bef3897..296e1a3f6c0 100644
    --- a/tests/functional/exit_codes/fixtures.py
    +++ b/tests/functional/exit_codes/fixtures.py
    @@ -74,5 +74,5 @@ def models(self):
                 "bad.sql": bad_sql,
                 "dupe.sql": dupe_sql,
                 "good.sql": good_sql,
    -            "schema.yml": schema_yml
    +            "schema.yml": schema_yml,
             }
    diff --git a/tests/functional/exit_codes/test_exit_codes.py b/tests/functional/exit_codes/test_exit_codes.py
    index 54b5cb6865e..44672beecae 100644
    --- a/tests/functional/exit_codes/test_exit_codes.py
    +++ b/tests/functional/exit_codes/test_exit_codes.py
    @@ -1,17 +1,13 @@
     import pytest
     
     import dbt.exceptions
    -from dbt.tests.util import (
    -    check_table_does_exist,
    -    check_table_does_not_exist,
    -    run_dbt
    -)
    +from dbt.tests.util import check_table_does_exist, check_table_does_not_exist, run_dbt
     from tests.functional.exit_codes.fixtures import (
         BaseConfigProject,
         snapshots_bad_sql,
         snapshots_good_sql,
         data_seed_bad_csv,
    -    data_seed_good_csv
    +    data_seed_good_csv,
     )
     
     
    @@ -21,38 +17,38 @@ def snapshots(self):
             return {"g.sql": snapshots_good_sql}
     
         def test_exit_code_run_succeed(self, project):
    -        results = run_dbt(['run', '--model', 'good'])
    +        results = run_dbt(["run", "--model", "good"])
             assert len(results) == 1
    -        check_table_does_exist(project.adapter, 'good')
    +        check_table_does_exist(project.adapter, "good")
     
         def test_exit_code_run_fail(self, project):
    -        results = run_dbt(['run', '--model', 'bad'], expect_pass=False)
    +        results = run_dbt(["run", "--model", "bad"], expect_pass=False)
             assert len(results) == 1
    -        check_table_does_not_exist(project.adapter, 'bad')
    +        check_table_does_not_exist(project.adapter, "bad")
     
         def test_schema_test_pass(self, project):
    -        results = run_dbt(['run', '--model', 'good'])
    +        results = run_dbt(["run", "--model", "good"])
             assert len(results) == 1
     
    -        results = run_dbt(['test', '--model', 'good'])
    +        results = run_dbt(["test", "--model", "good"])
             assert len(results) == 1
     
         def test_schema_test_fail(self, project):
    -        results = run_dbt(['run', '--model', 'dupe'])
    +        results = run_dbt(["run", "--model", "dupe"])
             assert len(results) == 1
     
    -        results = run_dbt(['test', '--model', 'dupe'], expect_pass=False)
    +        results = run_dbt(["test", "--model", "dupe"], expect_pass=False)
             assert len(results) == 1
     
         def test_compile(self, project):
    -        results = run_dbt(['compile'])
    +        results = run_dbt(["compile"])
             assert len(results) == 7
     
         def test_snapshot_pass(self, project):
             run_dbt(["run", "--model", "good"])
    -        results = run_dbt(['snapshot'])
    +        results = run_dbt(["snapshot"])
             assert len(results) == 1
    -        check_table_does_exist(project.adapter, 'good_snapshot')
    +        check_table_does_exist(project.adapter, "good_snapshot")
     
     
     class TestExitCodesSnapshotFail(BaseConfigProject):
    @@ -61,12 +57,12 @@ def snapshots(self):
             return {"b.sql": snapshots_bad_sql}
     
         def test_snapshot_fail(self, project):
    -        results = run_dbt(['run', '--model', 'good'])
    +        results = run_dbt(["run", "--model", "good"])
             assert len(results) == 1
     
    -        results = run_dbt(['snapshot'], expect_pass=False)
    +        results = run_dbt(["snapshot"], expect_pass=False)
             assert len(results) == 1
    -        check_table_does_not_exist(project.adapter, 'good_snapshot')
    +        check_table_does_not_exist(project.adapter, "good_snapshot")
     
     
     class TestExitCodesDeps:
    @@ -75,14 +71,14 @@ def packages(self):
             return {
                 "packages": [
                     {
    -                    'git': 'https://github.com/dbt-labs/dbt-integration-project',
    -                    'revision': 'dbt/1.0.0',
    +                    "git": "https://github.com/dbt-labs/dbt-integration-project",
    +                    "revision": "dbt/1.0.0",
                     }
                 ]
             }
     
         def test_deps(self, project):
    -        results = run_dbt(['deps'])
    +        results = run_dbt(["deps"])
             assert results is None
     
     
    @@ -92,15 +88,15 @@ def packages(self):
             return {
                 "packages": [
                     {
    -                    'git': 'https://github.com/dbt-labs/dbt-integration-project',
    -                    'revision': 'bad-branch',
    +                    "git": "https://github.com/dbt-labs/dbt-integration-project",
    +                    "revision": "bad-branch",
                     },
                 ]
             }
     
         def test_deps_fail(self, project):
             with pytest.raises(dbt.exceptions.GitCheckoutError) as exc:
    -            run_dbt(['deps'])
    +            run_dbt(["deps"])
             expected_msg = "Error checking out spec='bad-branch'"
             assert expected_msg in str(exc.value)
     
    @@ -111,7 +107,7 @@ def seeds(self):
             return {"good.csv": data_seed_good_csv}
     
         def test_seed(self, project):
    -        results = run_dbt(['seed'])
    +        results = run_dbt(["seed"])
             assert len(results) == 1
     
     
    @@ -121,4 +117,4 @@ def seeds(self):
             return {"bad.csv": data_seed_bad_csv}
     
         def test_seed(self, project):
    -        run_dbt(['seed'], expect_pass=False)
    +        run_dbt(["seed"], expect_pass=False)
    diff --git a/tests/functional/exposures/fixtures.py b/tests/functional/exposures/fixtures.py
    index 1d573b1a7b6..f02c5723f72 100644
    --- a/tests/functional/exposures/fixtures.py
    +++ b/tests/functional/exposures/fixtures.py
    @@ -1,4 +1,3 @@
    -
     models_sql = """
     select 1 as id
     """
    diff --git a/tests/functional/exposures/test_exposure_configs.py b/tests/functional/exposures/test_exposure_configs.py
    index a7018204952..199a6368a4a 100644
    --- a/tests/functional/exposures/test_exposure_configs.py
    +++ b/tests/functional/exposures/test_exposure_configs.py
    @@ -12,7 +12,7 @@
         enabled_yaml_level_exposure_yml,
         invalid_config_exposure_yml,
         source_schema_yml,
    -    metrics_schema_yml
    +    metrics_schema_yml,
     )
     
     
    diff --git a/tests/functional/exposures/test_exposures.py b/tests/functional/exposures/test_exposures.py
    index 777a8e161c4..97849fa0835 100644
    --- a/tests/functional/exposures/test_exposures.py
    +++ b/tests/functional/exposures/test_exposures.py
    @@ -6,7 +6,7 @@
         second_model_sql,
         simple_exposure_yml,
         source_schema_yml,
    -    metrics_schema_yml
    +    metrics_schema_yml,
     )
     
     
    @@ -37,8 +37,8 @@ def test_depends_on(self, project):
             manifest = get_manifest(project.project_root)
             exposure_depends_on = manifest.exposures["exposure.test.simple_exposure"].depends_on.nodes
             expected_exposure_depends_on = [
    -            'source.test.test_source.test_table',
    -            'model.test.model',
    -            'metric.test.metric'
    +            "source.test.test_source.test_table",
    +            "model.test.model",
    +            "metric.test.metric",
             ]
             assert sorted(exposure_depends_on) == sorted(expected_exposure_depends_on)
    diff --git a/tests/functional/external_reference/test_external_reference.py b/tests/functional/external_reference/test_external_reference.py
    new file mode 100644
    index 00000000000..8b5294155d8
    --- /dev/null
    +++ b/tests/functional/external_reference/test_external_reference.py
    @@ -0,0 +1,59 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt
    +
    +
    +external_model_sql = """
    +{{
    +  config(
    +    materialized = "view"
    +  )
    +}}
    +
    +select * from "{{ this.schema + 'z' }}"."external"
    +"""
    +
    +model_sql = """
    +select 1 as id
    +"""
    +
    +
    +class TestExternalReference:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"model.sql": external_model_sql}
    +
    +    def test_external_reference(self, project, unique_schema):
    +        external_schema = unique_schema + "z"
    +        project.run_sql(f'create schema "{external_schema}"')
    +        project.run_sql(f'create table "{external_schema}"."external" (id integer)')
    +        project.run_sql(f'insert into "{external_schema}"."external" values (1), (2)')
    +
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +
    +        # running it again should succeed
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +
    +
    +# The opposite of the test above -- check that external relations that
    +# depend on a dbt model do not create issues with caching
    +class TestExternalDependency:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"model.sql": model_sql}
    +
    +    def test_external_reference(self, project, unique_schema):
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +
    +        external_schema = unique_schema + "z"
    +        project.run_sql(f'create schema "{external_schema}"')
    +        project.run_sql(
    +            f'create view "{external_schema}"."external" as (select * from {unique_schema}.model)'
    +        )
    +
    +        # running it again should succeed
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    diff --git a/tests/functional/fail_fast/test_fail_fast_run.py b/tests/functional/fail_fast/test_fail_fast_run.py
    index 3ea3c4bc0f0..5c0c8cf849d 100644
    --- a/tests/functional/fail_fast/test_fail_fast_run.py
    +++ b/tests/functional/fail_fast/test_fail_fast_run.py
    @@ -2,7 +2,7 @@
     
     from dbt.tests.util import run_dbt
     from tests.functional.fail_fast.fixtures import models, project_files  # noqa: F401
    -from dbt.exceptions import FailFastException
    +from dbt.exceptions import FailFastError
     
     
     def check_audit_table(project, count=1):
    @@ -43,7 +43,7 @@ def test_fail_fast_run(
             self,
             project,
         ):
    -        with pytest.raises(FailFastException):
    +        with pytest.raises(FailFastError):
                 run_dbt(["run", "--threads", "1", "--fail-fast"])
                 check_audit_table(project)
     
    @@ -62,6 +62,6 @@ def test_fail_fast_run_user_config(
             self,
             project,
         ):
    -        with pytest.raises(FailFastException):
    +        with pytest.raises(FailFastError):
                 run_dbt(["run", "--threads", "1"])
                 check_audit_table(project)
    diff --git a/tests/functional/hooks/test_model_hooks.py b/tests/functional/hooks/test_model_hooks.py
    index 097fa8af0c8..99a05c9c895 100644
    --- a/tests/functional/hooks/test_model_hooks.py
    +++ b/tests/functional/hooks/test_model_hooks.py
    @@ -2,7 +2,7 @@
     
     from pathlib import Path
     
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     
     from dbt.tests.util import (
         run_dbt,
    @@ -170,6 +170,29 @@ def test_pre_and_post_run_hooks(self, project, dbt_profile_target):
             self.check_hooks("end", project, dbt_profile_target["host"])
     
     
    +class TestPrePostModelHooksUnderscores(TestPrePostModelHooks):
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        return {
    +            "models": {
    +                "test": {
    +                    "pre_hook": [
    +                        # inside transaction (runs second)
    +                        MODEL_PRE_HOOK,
    +                        # outside transaction (runs first)
    +                        {"sql": "vacuum {{ this.schema }}.on_model_hook", "transaction": False},
    +                    ],
    +                    "post_hook": [
    +                        # outside transaction (runs second)
    +                        {"sql": "vacuum {{ this.schema }}.on_model_hook", "transaction": False},
    +                        # inside transaction (runs first)
    +                        MODEL_POST_HOOK,
    +                    ],
    +                }
    +            }
    +        }
    +
    +
     class TestHookRefs(BaseTestPrePost):
         @pytest.fixture(scope="class")
         def project_config_update(self):
    @@ -399,7 +422,7 @@ def models(self):
             return {"hooks.sql": models__hooks_error}
     
         def test_run_duplicate_hook_defs(self, project):
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt()
             assert "pre_hook" in str(exc.value)
             assert "pre-hook" in str(exc.value)
    diff --git a/tests/functional/incremental_schema_tests/fixtures.py b/tests/functional/incremental_schema_tests/fixtures.py
    index c6eebc5e183..b80bea45e80 100644
    --- a/tests/functional/incremental_schema_tests/fixtures.py
    +++ b/tests/functional/incremental_schema_tests/fixtures.py
    @@ -1,4 +1,3 @@
    -
     #
     # Properties
     #
    diff --git a/tests/functional/incremental_schema_tests/test_incremental_schema.py b/tests/functional/incremental_schema_tests/test_incremental_schema.py
    index 3ee9e6477e4..8203f497331 100644
    --- a/tests/functional/incremental_schema_tests/test_incremental_schema.py
    +++ b/tests/functional/incremental_schema_tests/test_incremental_schema.py
    @@ -41,21 +41,16 @@ def models(self):
             return {
                 "incremental_sync_remove_only.sql": _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY,
                 "incremental_ignore.sql": _MODELS__INCREMENTAL_IGNORE,
    -            "incremental_sync_remove_only_target.sql":
    -                _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET,
    +            "incremental_sync_remove_only_target.sql": _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET,
                 "incremental_ignore_target.sql": _MODELS__INCREMENTAL_IGNORE_TARGET,
                 "incremental_fail.sql": _MODELS__INCREMENTAL_FAIL,
                 "incremental_sync_all_columns.sql": _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS,
    -            "incremental_append_new_columns_remove_one.sql":
    -                _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE,
    +            "incremental_append_new_columns_remove_one.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE,
                 "model_a.sql": _MODELS__A,
    -            "incremental_append_new_columns_target.sql":
    -                _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET,
    +            "incremental_append_new_columns_target.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET,
                 "incremental_append_new_columns.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS,
    -            "incremental_sync_all_columns_target.sql":
    -                _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET,
    -            "incremental_append_new_columns_remove_one_target.sql":
    -                _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET,
    +            "incremental_sync_all_columns_target.sql": _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET,
    +            "incremental_append_new_columns_remove_one_target.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET,
             }
     
         @pytest.fixture(scope="class")
    @@ -63,26 +58,19 @@ def tests(self):
             return {
                 "select_from_incremental.sql": _TESTS__SELECT_FROM_INCREMENTAL_IGNORE,
                 "select_from_a.sql": _TESTS__SELECT_FROM_A,
    -            "select_from_incremental_append_new_columns_target.sql":
    -                _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET,
    -            "select_from_incremental_sync_all_columns.sql":
    -                _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS,
    -            "select_from_incremental_sync_all_columns_target.sql":
    -                _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET,
    -            "select_from_incremental_ignore_target.sql":
    -                _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET,
    -            "select_from_incremental_append_new_columns.sql":
    -                _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS,
    +            "select_from_incremental_append_new_columns_target.sql": _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET,
    +            "select_from_incremental_sync_all_columns.sql": _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS,
    +            "select_from_incremental_sync_all_columns_target.sql": _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET,
    +            "select_from_incremental_ignore_target.sql": _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET,
    +            "select_from_incremental_append_new_columns.sql": _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS,
             }
     
    -    def run_twice_and_assert(
    -        self, include, compare_source, compare_target, project
    -    ):
    +    def run_twice_and_assert(self, include, compare_source, compare_target, project):
     
             # dbt run (twice)
    -        run_args = ['run']
    +        run_args = ["run"]
             if include:
    -            run_args.extend(('--select', include))
    +            run_args.extend(("--select", include))
             results_one = run_dbt(run_args)
             assert len(results_one) == 3
     
    @@ -92,33 +80,33 @@ def run_twice_and_assert(
             check_relations_equal(project.adapter, [compare_source, compare_target])
     
         def run_incremental_append_new_columns(self, project):
    -        select = 'model_a incremental_append_new_columns incremental_append_new_columns_target'
    -        compare_source = 'incremental_append_new_columns'
    -        compare_target = 'incremental_append_new_columns_target'
    +        select = "model_a incremental_append_new_columns incremental_append_new_columns_target"
    +        compare_source = "incremental_append_new_columns"
    +        compare_target = "incremental_append_new_columns_target"
             self.run_twice_and_assert(select, compare_source, compare_target, project)
     
         def run_incremental_append_new_columns_remove_one(self, project):
    -        select = 'model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target'
    -        compare_source = 'incremental_append_new_columns_remove_one'
    -        compare_target = 'incremental_append_new_columns_remove_one_target'
    +        select = "model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target"
    +        compare_source = "incremental_append_new_columns_remove_one"
    +        compare_target = "incremental_append_new_columns_remove_one_target"
             self.run_twice_and_assert(select, compare_source, compare_target, project)
     
         def run_incremental_sync_all_columns(self, project):
    -        select = 'model_a incremental_sync_all_columns incremental_sync_all_columns_target'
    -        compare_source = 'incremental_sync_all_columns'
    -        compare_target = 'incremental_sync_all_columns_target'
    +        select = "model_a incremental_sync_all_columns incremental_sync_all_columns_target"
    +        compare_source = "incremental_sync_all_columns"
    +        compare_target = "incremental_sync_all_columns_target"
             self.run_twice_and_assert(select, compare_source, compare_target, project)
     
         def run_incremental_sync_remove_only(self, project):
    -        select = 'model_a incremental_sync_remove_only incremental_sync_remove_only_target'
    -        compare_source = 'incremental_sync_remove_only'
    -        compare_target = 'incremental_sync_remove_only_target'
    +        select = "model_a incremental_sync_remove_only incremental_sync_remove_only_target"
    +        compare_source = "incremental_sync_remove_only"
    +        compare_target = "incremental_sync_remove_only_target"
             self.run_twice_and_assert(select, compare_source, compare_target, project)
     
         def test_run_incremental_ignore(self, project):
    -        select = 'model_a incremental_ignore incremental_ignore_target'
    -        compare_source = 'incremental_ignore'
    -        compare_target = 'incremental_ignore_target'
    +        select = "model_a incremental_ignore incremental_ignore_target"
    +        compare_source = "incremental_ignore"
    +        compare_target = "incremental_ignore_target"
             self.run_twice_and_assert(select, compare_source, compare_target, project)
     
         def test_run_incremental_append_new_columns(self, project):
    @@ -130,7 +118,7 @@ def test_run_incremental_sync_all_columns(self, project):
             self.run_incremental_sync_remove_only(project)
     
         def test_run_incremental_fail_on_schema_change(self, project):
    -        select = 'model_a incremental_fail'
    -        run_dbt(['run', '--models', select, '--full-refresh'])
    -        results_two = run_dbt(['run', '--models', select], expect_pass=False)
    -        assert 'Compilation Error' in results_two[1].message
    +        select = "model_a incremental_fail"
    +        run_dbt(["run", "--models", select, "--full-refresh"])
    +        results_two = run_dbt(["run", "--models", select], expect_pass=False)
    +        assert "Compilation Error" in results_two[1].message
    diff --git a/tests/functional/init/test_init.py b/tests/functional/init/test_init.py
    new file mode 100644
    index 00000000000..6a79dfe9807
    --- /dev/null
    +++ b/tests/functional/init/test_init.py
    @@ -0,0 +1,688 @@
    +import click
    +import os
    +import pytest
    +from pathlib import Path
    +from unittest import mock
    +from unittest.mock import Mock, call
    +
    +from dbt.tests.util import run_dbt
    +
    +
    +class TestInitProjectWithExistingProfilesYml:
    +    @mock.patch("dbt.task.init._get_adapter_plugin_names")
    +    @mock.patch("click.confirm")
    +    @mock.patch("click.prompt")
    +    def test_init_task_in_project_with_existing_profiles_yml(
    +        self, mock_prompt, mock_confirm, mock_get_adapter, project
    +    ):
    +        manager = Mock()
    +        manager.attach_mock(mock_prompt, "prompt")
    +        manager.attach_mock(mock_confirm, "confirm")
    +        manager.confirm.side_effect = ["y"]
    +        manager.prompt.side_effect = [
    +            1,
    +            "localhost",
    +            5432,
    +            "test_user",
    +            "test_password",
    +            "test_db",
    +            "test_schema",
    +            4,
    +        ]
    +        mock_get_adapter.return_value = [project.adapter.type()]
    +
    +        run_dbt(["init"])
    +
    +        manager.assert_has_calls(
    +            [
    +                call.confirm(
    +                    f"The profile test already exists in {os.path.join(project.profiles_dir, 'profiles.yml')}. Continue and overwrite it?"
    +                ),
    +                call.prompt(
    +                    "Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number",
    +                    type=click.INT,
    +                ),
    +                call.prompt(
    +                    "host (hostname for the instance)", default=None, hide_input=False, type=None
    +                ),
    +                call.prompt("port", default=5432, hide_input=False, type=click.INT),
    +                call.prompt("user (dev username)", default=None, hide_input=False, type=None),
    +                call.prompt("pass (dev password)", default=None, hide_input=True, type=None),
    +                call.prompt(
    +                    "dbname (default database that dbt will build objects in)",
    +                    default=None,
    +                    hide_input=False,
    +                    type=None,
    +                ),
    +                call.prompt(
    +                    "schema (default schema that dbt will build objects in)",
    +                    default=None,
    +                    hide_input=False,
    +                    type=None,
    +                ),
    +                call.prompt("threads (1 or more)", default=1, hide_input=False, type=click.INT),
    +            ]
    +        )
    +
    +        with open(os.path.join(project.profiles_dir, "profiles.yml"), "r") as f:
    +            assert (
    +                f.read()
    +                == """config:
    +  send_anonymous_usage_stats: false
    +test:
    +  outputs:
    +    dev:
    +      dbname: test_db
    +      host: localhost
    +      pass: test_password
    +      port: 5432
    +      schema: test_schema
    +      threads: 4
    +      type: postgres
    +      user: test_user
    +  target: dev
    +"""
    +            )
    +
    +
    +class TestInitProjectWithoutExistingProfilesYml:
    +    @mock.patch("dbt.task.init._get_adapter_plugin_names")
    +    @mock.patch("click.prompt")
    +    @mock.patch.object(Path, "exists", autospec=True)
    +    def test_init_task_in_project_without_existing_profiles_yml(
    +        self, exists, mock_prompt, mock_get_adapter, project
    +    ):
    +        def exists_side_effect(path):
    +            # Override responses on specific files, default to 'real world' if not overriden
    +            return {"profiles.yml": False}.get(path.name, os.path.exists(path))
    +
    +        exists.side_effect = exists_side_effect
    +        manager = Mock()
    +        manager.attach_mock(mock_prompt, "prompt")
    +        manager.prompt.side_effect = [
    +            1,
    +            "localhost",
    +            5432,
    +            "test_user",
    +            "test_password",
    +            "test_db",
    +            "test_schema",
    +            4,
    +        ]
    +        mock_get_adapter.return_value = [project.adapter.type()]
    +
    +        run_dbt(["init"])
    +
    +        manager.assert_has_calls(
    +            [
    +                call.prompt(
    +                    "Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number",
    +                    type=click.INT,
    +                ),
    +                call.prompt(
    +                    "host (hostname for the instance)", default=None, hide_input=False, type=None
    +                ),
    +                call.prompt("port", default=5432, hide_input=False, type=click.INT),
    +                call.prompt("user (dev username)", default=None, hide_input=False, type=None),
    +                call.prompt("pass (dev password)", default=None, hide_input=True, type=None),
    +                call.prompt(
    +                    "dbname (default database that dbt will build objects in)",
    +                    default=None,
    +                    hide_input=False,
    +                    type=None,
    +                ),
    +                call.prompt(
    +                    "schema (default schema that dbt will build objects in)",
    +                    default=None,
    +                    hide_input=False,
    +                    type=None,
    +                ),
    +                call.prompt("threads (1 or more)", default=1, hide_input=False, type=click.INT),
    +            ]
    +        )
    +
    +        with open(os.path.join(project.profiles_dir, "profiles.yml"), "r") as f:
    +            assert (
    +                f.read()
    +                == """test:
    +  outputs:
    +    dev:
    +      dbname: test_db
    +      host: localhost
    +      pass: test_password
    +      port: 5432
    +      schema: test_schema
    +      threads: 4
    +      type: postgres
    +      user: test_user
    +  target: dev
    +"""
    +            )
    +
    +
    +class TestInitProjectWithoutExistingProfilesYmlOrTemplate:
    +    @mock.patch("dbt.task.init._get_adapter_plugin_names")
    +    @mock.patch("click.confirm")
    +    @mock.patch("click.prompt")
    +    @mock.patch.object(Path, "exists", autospec=True)
    +    def test_init_task_in_project_without_existing_profiles_yml_or_profile_template(
    +        self, exists, mock_prompt, mock_confirm, mock_get_adapter, project
    +    ):
    +        def exists_side_effect(path):
    +            # Override responses on specific files, default to 'real world' if not overriden
    +            return {
    +                "profiles.yml": False,
    +                "profile_template.yml": False,
    +            }.get(path.name, os.path.exists(path))
    +
    +        exists.side_effect = exists_side_effect
    +        manager = Mock()
    +        manager.attach_mock(mock_prompt, "prompt")
    +        manager.attach_mock(mock_confirm, "confirm")
    +        manager.prompt.side_effect = [
    +            1,
    +        ]
    +        mock_get_adapter.return_value = [project.adapter.type()]
    +        run_dbt(["init"])
    +        manager.assert_has_calls(
    +            [
    +                call.prompt(
    +                    "Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number",
    +                    type=click.INT,
    +                ),
    +            ]
    +        )
    +
    +        with open(os.path.join(project.profiles_dir, "profiles.yml"), "r") as f:
    +            assert (
    +                f.read()
    +                == """test:
    +  outputs:
    +
    +    dev:
    +      type: postgres
    +      threads: [1 or more]
    +      host: [host]
    +      port: [port]
    +      user: [dev_username]
    +      pass: [dev_password]
    +      dbname: [dbname]
    +      schema: [dev_schema]
    +
    +    prod:
    +      type: postgres
    +      threads: [1 or more]
    +      host: [host]
    +      port: [port]
    +      user: [prod_username]
    +      pass: [prod_password]
    +      dbname: [dbname]
    +      schema: [prod_schema]
    +
    +  target: dev
    +"""
    +            )
    +
    +
    +class TestInitProjectWithProfileTemplateWithoutExistingProfilesYml:
    +    @mock.patch("dbt.task.init._get_adapter_plugin_names")
    +    @mock.patch("click.confirm")
    +    @mock.patch("click.prompt")
    +    @mock.patch.object(Path, "exists", autospec=True)
    +    def test_init_task_in_project_with_profile_template_without_existing_profiles_yml(
    +        self, exists, mock_prompt, mock_confirm, mock_get_adapter, project
    +    ):
    +        def exists_side_effect(path):
    +            # Override responses on specific files, default to 'real world' if not overriden
    +            return {
    +                "profiles.yml": False,
    +            }.get(path.name, os.path.exists(path))
    +
    +        exists.side_effect = exists_side_effect
    +
    +        with open("profile_template.yml", "w") as f:
    +            f.write(
    +                """fixed:
    +  type: postgres
    +  threads: 4
    +  host: localhost
    +  dbname: my_db
    +  schema: my_schema
    +  target: my_target
    +prompts:
    +  target:
    +    hint: 'The target name'
    +    type: string
    +  port:
    +    hint: 'The port (for integer test purposes)'
    +    type: int
    +    default: 5432
    +  user:
    +    hint: 'Your username'
    +  pass:
    +    hint: 'Your password'
    +    hide_input: true"""
    +            )
    +
    +        manager = Mock()
    +        manager.attach_mock(mock_prompt, "prompt")
    +        manager.attach_mock(mock_confirm, "confirm")
    +        manager.prompt.side_effect = ["my_target", 5432, "test_username", "test_password"]
    +        mock_get_adapter.return_value = [project.adapter.type()]
    +        run_dbt(["init"])
    +        manager.assert_has_calls(
    +            [
    +                call.prompt(
    +                    "target (The target name)", default=None, hide_input=False, type=click.STRING
    +                ),
    +                call.prompt(
    +                    "port (The port (for integer test purposes))",
    +                    default=5432,
    +                    hide_input=False,
    +                    type=click.INT,
    +                ),
    +                call.prompt("user (Your username)", default=None, hide_input=False, type=None),
    +                call.prompt("pass (Your password)", default=None, hide_input=True, type=None),
    +            ]
    +        )
    +
    +        with open(os.path.join(project.profiles_dir, "profiles.yml"), "r") as f:
    +            assert (
    +                f.read()
    +                == """test:
    +  outputs:
    +    my_target:
    +      dbname: my_db
    +      host: localhost
    +      pass: test_password
    +      port: 5432
    +      schema: my_schema
    +      threads: 4
    +      type: postgres
    +      user: test_username
    +  target: my_target
    +"""
    +            )
    +
    +
    +class TestInitInvalidProfileTemplate:
    +    @mock.patch("dbt.task.init._get_adapter_plugin_names")
    +    @mock.patch("click.confirm")
    +    @mock.patch("click.prompt")
    +    def test_init_task_in_project_with_invalid_profile_template(
    +        self, mock_prompt, mock_confirm, mock_get_adapter, project
    +    ):
    +        """Test that when an invalid profile_template.yml is provided in the project,
    +        init command falls back to the target's profile_template.yml"""
    +        with open(os.path.join(project.project_root, "profile_template.yml"), "w") as f:
    +            f.write("""invalid template""")
    +
    +        manager = Mock()
    +        manager.attach_mock(mock_prompt, "prompt")
    +        manager.attach_mock(mock_confirm, "confirm")
    +        manager.confirm.side_effect = ["y"]
    +        manager.prompt.side_effect = [
    +            1,
    +            "localhost",
    +            5432,
    +            "test_username",
    +            "test_password",
    +            "test_db",
    +            "test_schema",
    +            4,
    +        ]
    +        mock_get_adapter.return_value = [project.adapter.type()]
    +
    +        run_dbt(["init"])
    +
    +        manager.assert_has_calls(
    +            [
    +                call.confirm(
    +                    f"The profile test already exists in {os.path.join(project.profiles_dir, 'profiles.yml')}. Continue and overwrite it?"
    +                ),
    +                call.prompt(
    +                    "Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number",
    +                    type=click.INT,
    +                ),
    +                call.prompt(
    +                    "host (hostname for the instance)", default=None, hide_input=False, type=None
    +                ),
    +                call.prompt("port", default=5432, hide_input=False, type=click.INT),
    +                call.prompt("user (dev username)", default=None, hide_input=False, type=None),
    +                call.prompt("pass (dev password)", default=None, hide_input=True, type=None),
    +                call.prompt(
    +                    "dbname (default database that dbt will build objects in)",
    +                    default=None,
    +                    hide_input=False,
    +                    type=None,
    +                ),
    +                call.prompt(
    +                    "schema (default schema that dbt will build objects in)",
    +                    default=None,
    +                    hide_input=False,
    +                    type=None,
    +                ),
    +                call.prompt("threads (1 or more)", default=1, hide_input=False, type=click.INT),
    +            ]
    +        )
    +
    +        with open(os.path.join(project.profiles_dir, "profiles.yml"), "r") as f:
    +            assert (
    +                f.read()
    +                == """config:
    +  send_anonymous_usage_stats: false
    +test:
    +  outputs:
    +    dev:
    +      dbname: test_db
    +      host: localhost
    +      pass: test_password
    +      port: 5432
    +      schema: test_schema
    +      threads: 4
    +      type: postgres
    +      user: test_username
    +  target: dev
    +"""
    +            )
    +
    +
    +class TestInitOutsideOfProjectBase:
    +    @pytest.fixture(scope="class")
    +    def project_name(self, unique_schema):
    +        return f"my_project_{unique_schema}"
    +
    +    @pytest.fixture(scope="class", autouse=True)
    +    def setup(self, project):
    +        # Start by removing the dbt_project.yml so that we're not in an existing project
    +        os.remove(os.path.join(project.project_root, "dbt_project.yml"))
    +
    +
    +class TestInitOutsideOfProject(TestInitOutsideOfProjectBase):
    +    @pytest.fixture(scope="class")
    +    def dbt_profile_data(self, unique_schema):
    +        return {
    +            "config": {"send_anonymous_usage_stats": False},
    +            "test": {
    +                "outputs": {
    +                    "default2": {
    +                        "type": "postgres",
    +                        "threads": 4,
    +                        "host": "localhost",
    +                        "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)),
    +                        "user": os.getenv("POSTGRES_TEST_USER", "root"),
    +                        "pass": os.getenv("POSTGRES_TEST_PASS", "password"),
    +                        "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"),
    +                        "schema": unique_schema,
    +                    },
    +                    "noaccess": {
    +                        "type": "postgres",
    +                        "threads": 4,
    +                        "host": "localhost",
    +                        "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)),
    +                        "user": "noaccess",
    +                        "pass": "password",
    +                        "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"),
    +                        "schema": unique_schema,
    +                    },
    +                },
    +                "target": "default2",
    +            },
    +        }
    +
    +    @mock.patch("dbt.task.init._get_adapter_plugin_names")
    +    @mock.patch("click.confirm")
    +    @mock.patch("click.prompt")
    +    def test_init_task_outside_of_project(
    +        self, mock_prompt, mock_confirm, mock_get_adapter, project, project_name, unique_schema
    +    ):
    +        manager = Mock()
    +        manager.attach_mock(mock_prompt, "prompt")
    +        manager.attach_mock(mock_confirm, "confirm")
    +        manager.prompt.side_effect = [
    +            project_name,
    +            1,
    +            "localhost",
    +            5432,
    +            "test_username",
    +            "test_password",
    +            "test_db",
    +            "test_schema",
    +            4,
    +        ]
    +        mock_get_adapter.return_value = [project.adapter.type()]
    +        run_dbt(["init"])
    +
    +        manager.assert_has_calls(
    +            [
    +                call.prompt("Enter a name for your project (letters, digits, underscore)"),
    +                call.prompt(
    +                    "Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number",
    +                    type=click.INT,
    +                ),
    +                call.prompt(
    +                    "host (hostname for the instance)", default=None, hide_input=False, type=None
    +                ),
    +                call.prompt("port", default=5432, hide_input=False, type=click.INT),
    +                call.prompt("user (dev username)", default=None, hide_input=False, type=None),
    +                call.prompt("pass (dev password)", default=None, hide_input=True, type=None),
    +                call.prompt(
    +                    "dbname (default database that dbt will build objects in)",
    +                    default=None,
    +                    hide_input=False,
    +                    type=None,
    +                ),
    +                call.prompt(
    +                    "schema (default schema that dbt will build objects in)",
    +                    default=None,
    +                    hide_input=False,
    +                    type=None,
    +                ),
    +                call.prompt("threads (1 or more)", default=1, hide_input=False, type=click.INT),
    +            ]
    +        )
    +
    +        with open(os.path.join(project.profiles_dir, "profiles.yml"), "r") as f:
    +            assert (
    +                f.read()
    +                == f"""config:
    +  send_anonymous_usage_stats: false
    +{project_name}:
    +  outputs:
    +    dev:
    +      dbname: test_db
    +      host: localhost
    +      pass: test_password
    +      port: 5432
    +      schema: test_schema
    +      threads: 4
    +      type: postgres
    +      user: test_username
    +  target: dev
    +test:
    +  outputs:
    +    default2:
    +      dbname: dbt
    +      host: localhost
    +      pass: password
    +      port: 5432
    +      schema: {unique_schema}
    +      threads: 4
    +      type: postgres
    +      user: root
    +    noaccess:
    +      dbname: dbt
    +      host: localhost
    +      pass: password
    +      port: 5432
    +      schema: {unique_schema}
    +      threads: 4
    +      type: postgres
    +      user: noaccess
    +  target: default2
    +"""
    +            )
    +
    +        with open(os.path.join(project.project_root, project_name, "dbt_project.yml"), "r") as f:
    +            assert (
    +                f.read()
    +                == f"""
    +# Name your project! Project names should contain only lowercase characters
    +# and underscores. A good package name should reflect your organization's
    +# name or the intended use of these models
    +name: '{project_name}'
    +version: '1.0.0'
    +config-version: 2
    +
    +# This setting configures which "profile" dbt uses for this project.
    +profile: '{project_name}'
    +
    +# These configurations specify where dbt should look for different types of files.
    +# The `model-paths` config, for example, states that models in this project can be
    +# found in the "models/" directory. You probably won't need to change these!
    +model-paths: ["models"]
    +analysis-paths: ["analyses"]
    +test-paths: ["tests"]
    +seed-paths: ["seeds"]
    +macro-paths: ["macros"]
    +snapshot-paths: ["snapshots"]
    +
    +target-path: "target"  # directory which will store compiled SQL files
    +clean-targets:         # directories to be removed by `dbt clean`
    +  - "target"
    +  - "dbt_packages"
    +
    +
    +# Configuring models
    +# Full documentation: https://docs.getdbt.com/docs/configuring-models
    +
    +# In this example config, we tell dbt to build all models in the example/
    +# directory as views. These settings can be overridden in the individual model
    +# files using the `{{{{ config(...) }}}}` macro.
    +models:
    +  {project_name}:
    +    # Config indicated by + and applies to all files under models/example/
    +    example:
    +      +materialized: view
    +"""
    +            )
    +
    +
    +class TestInitInvalidProjectNameCLI(TestInitOutsideOfProjectBase):
    +    @mock.patch("dbt.task.init._get_adapter_plugin_names")
    +    @mock.patch("click.confirm")
    +    @mock.patch("click.prompt")
    +    def test_init_invalid_project_name_cli(
    +        self, mock_prompt, mock_confirm, mock_get_adapter, project_name, project
    +    ):
    +        manager = Mock()
    +        manager.attach_mock(mock_prompt, "prompt")
    +        manager.attach_mock(mock_confirm, "confirm")
    +
    +        invalid_name = "name-with-hyphen"
    +        valid_name = project_name
    +        manager.prompt.side_effect = [valid_name]
    +        mock_get_adapter.return_value = [project.adapter.type()]
    +
    +        run_dbt(["init", invalid_name, "-s"])
    +        manager.assert_has_calls(
    +            [
    +                call.prompt("Enter a name for your project (letters, digits, underscore)"),
    +            ]
    +        )
    +
    +
    +class TestInitInvalidProjectNamePrompt(TestInitOutsideOfProjectBase):
    +    @mock.patch("dbt.task.init._get_adapter_plugin_names")
    +    @mock.patch("click.confirm")
    +    @mock.patch("click.prompt")
    +    def test_init_invalid_project_name_prompt(
    +        self, mock_prompt, mock_confirm, mock_get_adapter, project_name, project
    +    ):
    +        manager = Mock()
    +        manager.attach_mock(mock_prompt, "prompt")
    +        manager.attach_mock(mock_confirm, "confirm")
    +
    +        invalid_name = "name-with-hyphen"
    +        valid_name = project_name
    +        manager.prompt.side_effect = [invalid_name, valid_name]
    +        mock_get_adapter.return_value = [project.adapter.type()]
    +
    +        run_dbt(["init", "-s"])
    +        manager.assert_has_calls(
    +            [
    +                call.prompt("Enter a name for your project (letters, digits, underscore)"),
    +                call.prompt("Enter a name for your project (letters, digits, underscore)"),
    +            ]
    +        )
    +
    +
    +class TestInitProvidedProjectNameAndSkipProfileSetup(TestInitOutsideOfProjectBase):
    +    @mock.patch("dbt.task.init._get_adapter_plugin_names")
    +    @mock.patch("click.confirm")
    +    @mock.patch("click.prompt")
    +    def test_init_provided_project_name_and_skip_profile_setup(
    +        self, mock_prompt, mock_confirm, mock_get, project, project_name
    +    ):
    +        manager = mock.Mock()
    +        manager.attach_mock(mock_prompt, "prompt")
    +        manager.attach_mock(mock_confirm, "confirm")
    +        manager.prompt.side_effect = [
    +            1,
    +            "localhost",
    +            5432,
    +            "test_username",
    +            "test_password",
    +            "test_db",
    +            "test_schema",
    +            4,
    +        ]
    +        mock_get.return_value = [project.adapter.type()]
    +
    +        # provide project name through the init command
    +        run_dbt(["init", project_name, "-s"])
    +        manager.assert_not_called()
    +
    +        with open(os.path.join(project.project_root, project_name, "dbt_project.yml"), "r") as f:
    +            assert (
    +                f.read()
    +                == f"""
    +# Name your project! Project names should contain only lowercase characters
    +# and underscores. A good package name should reflect your organization's
    +# name or the intended use of these models
    +name: '{project_name}'
    +version: '1.0.0'
    +config-version: 2
    +
    +# This setting configures which "profile" dbt uses for this project.
    +profile: '{project_name}'
    +
    +# These configurations specify where dbt should look for different types of files.
    +# The `model-paths` config, for example, states that models in this project can be
    +# found in the "models/" directory. You probably won't need to change these!
    +model-paths: ["models"]
    +analysis-paths: ["analyses"]
    +test-paths: ["tests"]
    +seed-paths: ["seeds"]
    +macro-paths: ["macros"]
    +snapshot-paths: ["snapshots"]
    +
    +target-path: "target"  # directory which will store compiled SQL files
    +clean-targets:         # directories to be removed by `dbt clean`
    +  - "target"
    +  - "dbt_packages"
    +
    +
    +# Configuring models
    +# Full documentation: https://docs.getdbt.com/docs/configuring-models
    +
    +# In this example config, we tell dbt to build all models in the example/
    +# directory as views. These settings can be overridden in the individual model
    +# files using the `{{{{ config(...) }}}}` macro.
    +models:
    +  {project_name}:
    +    # Config indicated by + and applies to all files under models/example/
    +    example:
    +      +materialized: view
    +"""
    +            )
    diff --git a/tests/functional/invalid_model_tests/test_invalid_models.py b/tests/functional/invalid_model_tests/test_invalid_models.py
    index 29739dcac20..09db17bc325 100644
    --- a/tests/functional/invalid_model_tests/test_invalid_models.py
    +++ b/tests/functional/invalid_model_tests/test_invalid_models.py
    @@ -1,6 +1,6 @@
     import pytest
     
    -from dbt.exceptions import CompilationException, ParsingException
    +from dbt.exceptions import CompilationError, ParsingError
     
     from dbt.tests.util import (
         run_dbt,
    @@ -129,7 +129,7 @@ def models(self):
             }
     
         def test_view_disabled(self, project):
    -        with pytest.raises(ParsingException) as exc:
    +        with pytest.raises(ParsingError) as exc:
                 run_dbt(["seed"])
     
             assert "enabled" in str(exc.value)
    @@ -146,7 +146,7 @@ def models(self):
             }
     
         def test_referencing_disabled_model(self, project):
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt()
     
             assert "which is disabled" in str(exc.value)
    @@ -160,7 +160,7 @@ def models(self):
             return {"models__dependent_on_view.sql": models__dependent_on_view}
     
         def test_models_not_found(self, project):
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt()
     
             assert "which was not found" in str(exc.value)
    @@ -176,7 +176,7 @@ def models(self):
             return {"models__with_bad_macro.sql": models__with_bad_macro}
     
         def test_with_invalid_macro_call(self, project):
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt(["compile"])
     
             assert "macro 'dbt_macro__some_macro' takes no keyword argument 'invalid'" in str(
    @@ -207,7 +207,7 @@ def project_config_update(self):
             }
     
         def test_postgres_source_disabled(self, project):
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt()
     
             assert "which is disabled" in str(exc.value)
    @@ -221,7 +221,7 @@ def models(self):
             return {"models__referencing_disabled_source.sql": models__referencing_disabled_source}
     
         def test_source_missing(self, project):
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt()
     
             assert "which was not found" in str(exc.value)
    diff --git a/tests/functional/logging/test_logging.py b/tests/functional/logging/test_logging.py
    index b0feea50809..fc63e5da5dc 100644
    --- a/tests/functional/logging/test_logging.py
    +++ b/tests/functional/logging/test_logging.py
    @@ -1,6 +1,7 @@
     import pytest
     from dbt.tests.util import run_dbt, get_manifest, read_file
     import json
    +import os
     
     
     my_model_sql = """
    @@ -26,7 +27,8 @@ def test_basic(project, logs_dir):
         assert log_file
         node_start = False
         node_finished = False
    -    for log_line in log_file.split('\n'):
    +    connection_reused_data = []
    +    for log_line in log_file.split("\n"):
             # skip empty lines
             if len(log_line) == 0:
                 continue
    @@ -34,18 +36,31 @@ def test_basic(project, logs_dir):
             if "[debug]" in log_line:
                 continue
             log_dct = json.loads(log_line)
    -        log_event = log_dct['info']['name']
    +        log_data = log_dct["data"]
    +        log_event = log_dct["info"]["name"]
    +        if log_event == "ConnectionReused":
    +            connection_reused_data.append(log_data)
             if log_event == "NodeStart":
                 node_start = True
             if log_event == "NodeFinished":
                 node_finished = True
    +            assert log_data["run_result"]["adapter_response"]
             if node_start and not node_finished:
    -            if log_event == 'NodeExecuting':
    -                assert "node_info" in log_dct
    +            if log_event == "NodeExecuting":
    +                assert "node_info" in log_data
                 if log_event == "JinjaLogDebug":
    -                assert "node_info" in log_dct
    +                assert "node_info" in log_data
                 if log_event == "SQLQuery":
    -                assert "node_info" in log_dct
    +                assert "node_info" in log_data
                 if log_event == "TimingInfoCollected":
    -                assert "node_info" in log_dct
    -                assert "timing_info" in log_dct
    +                assert "node_info" in log_data
    +                assert "timing_info" in log_data
    +
    +    # windows doesn't have the same thread/connection flow so the ConnectionReused
    +    # events don't show up
    +    if os.name != "nt":
    +        # Verify the ConnectionReused event occurs and has the right data
    +        assert connection_reused_data
    +        for data in connection_reused_data:
    +            assert "conn_name" in data and data["conn_name"]
    +            assert "orig_conn_name" in data and data["orig_conn_name"]
    diff --git a/tests/functional/logging/test_meta_logging.py b/tests/functional/logging/test_meta_logging.py
    new file mode 100644
    index 00000000000..189562bba49
    --- /dev/null
    +++ b/tests/functional/logging/test_meta_logging.py
    @@ -0,0 +1,44 @@
    +import pytest
    +from dbt.tests.util import run_dbt, read_file
    +import json
    +
    +model1 = "select 1 as fun"
    +model2 = '{{ config(meta={"owners": ["team1", "team2"]})}} select 1 as fun'
    +model3 = '{{ config(meta={"key": 1})}} select 1 as fun'
    +
    +
    +@pytest.fixture(scope="class")  # noqa
    +def models():
    +    return {"model1.sql": model1, "model2.sql": model2, "model3.sql": model3}
    +
    +
    +# This test checks that various events contain node_info,
    +# which is supplied by the log_contextvars context manager
    +def test_meta(project, logs_dir):
    +    run_dbt(["--log-format=json", "run"])
    +
    +    # get log file
    +    log_file = read_file(logs_dir, "dbt.log")
    +    assert log_file
    +
    +    for log_line in log_file.split("\n"):
    +        # skip empty lines
    +        if len(log_line) == 0:
    +            continue
    +        # The adapter logging also shows up, so skip non-json lines
    +        if "[debug]" in log_line:
    +            continue
    +
    +        log_dct = json.loads(log_line)
    +        if "node_info" not in log_dct["data"]:
    +            continue
    +
    +        print(f"--- log_dct: {log_dct}")
    +        node_info = log_dct["data"]["node_info"]
    +        node_path = node_info["node_path"]
    +        if node_path == "model1.sql":
    +            assert node_info["meta"] == {}
    +        elif node_path == "model2.sql":
    +            assert node_info["meta"] == {"owners": "['team1', 'team2']"}
    +        elif node_path == "model3.sql":
    +            assert node_info["meta"] == {"key": "1"}
    diff --git a/tests/functional/macros/test_macros.py b/tests/functional/macros/test_macros.py
    index 899be2453b1..e7f25acab3a 100644
    --- a/tests/functional/macros/test_macros.py
    +++ b/tests/functional/macros/test_macros.py
    @@ -97,7 +97,7 @@ def macros(self):
             return {"my_macros.sql": macros__no_default_macros}
     
         def test_invalid_macro(self, project):
    -        with pytest.raises(dbt.exceptions.CompilationException) as exc:
    +        with pytest.raises(dbt.exceptions.CompilationError) as exc:
                 run_dbt()
     
             assert "In dispatch: No macro named 'dispatch_to_nowhere' found" in str(exc.value)
    @@ -213,7 +213,7 @@ def macros(self):
             return {"macro.sql": macros__deprecated_adapter_macro}
     
         def test_invalid_macro(self, project):
    -        with pytest.raises(dbt.exceptions.CompilationException) as exc:
    +        with pytest.raises(dbt.exceptions.CompilationError) as exc:
                 run_dbt()
     
             assert 'The "adapter_macro" macro has been deprecated' in str(exc.value)
    diff --git a/tests/functional/materializations/test_incremental.py b/tests/functional/materializations/test_incremental.py
    index f6ec8b2a3e9..7e8df9ea6f1 100644
    --- a/tests/functional/materializations/test_incremental.py
    +++ b/tests/functional/materializations/test_incremental.py
    @@ -1,6 +1,6 @@
     import pytest
     from dbt.tests.util import run_dbt, get_manifest
    -from dbt.exceptions import RuntimeException
    +from dbt.exceptions import DbtRuntimeError
     from dbt.context.providers import generate_runtime_model_context
     
     
    @@ -43,10 +43,10 @@ def test_basic(project):
         assert type(macro_func).__name__ == "MacroGenerator"
     
         # These two incremental strategies are not valid for Postgres
    -    with pytest.raises(RuntimeException) as excinfo:
    +    with pytest.raises(DbtRuntimeError) as excinfo:
             macro_func = project.adapter.get_incremental_strategy_macro(context, "merge")
         assert "merge" in str(excinfo.value)
     
    -    with pytest.raises(RuntimeException) as excinfo:
    +    with pytest.raises(DbtRuntimeError) as excinfo:
             macro_func = project.adapter.get_incremental_strategy_macro(context, "insert_overwrite")
         assert "insert_overwrite" in str(excinfo.value)
    diff --git a/tests/functional/metrics/fixtures.py b/tests/functional/metrics/fixtures.py
    index e191f609977..8a03cb0d7fa 100644
    --- a/tests/functional/metrics/fixtures.py
    +++ b/tests/functional/metrics/fixtures.py
    @@ -642,3 +642,58 @@
         meta:
             my_meta: 'testing'
     """
    +
    +metric_without_timestamp_or_timegrains_yml = """
    +version: 2
    +
    +metrics:
    +  - name: number_of_people
    +    label: "Number of people"
    +    description: Total count of people
    +    model: "ref('people')"
    +    calculation_method: count
    +    expression: "*"
    +    dimensions:
    +      - favorite_color
    +      - loves_dbt
    +    meta:
    +        my_meta: 'testing'
    +"""
    +
    +invalid_metric_without_timestamp_with_time_grains_yml = """
    +version: 2
    +
    +metrics:
    +  - name: number_of_people
    +    label: "Number of people"
    +    description: Total count of people
    +    model: "ref('people')"
    +    time_grains: [day, week, month]
    +    calculation_method: count
    +    expression: "*"
    +    dimensions:
    +      - favorite_color
    +      - loves_dbt
    +    meta:
    +        my_meta: 'testing'
    +"""
    +
    +invalid_metric_without_timestamp_with_window_yml = """
    +version: 2
    +
    +metrics:
    +  - name: number_of_people
    +    label: "Number of people"
    +    description: Total count of people
    +    model: "ref('people')"
    +    window:
    +      count: 14
    +      period: day
    +    calculation_method: count
    +    expression: "*"
    +    dimensions:
    +      - favorite_color
    +      - loves_dbt
    +    meta:
    +        my_meta: 'testing'
    +"""
    diff --git a/tests/functional/metrics/test_metric_configs.py b/tests/functional/metrics/test_metric_configs.py
    index 88c39e0537d..6ad960ec11f 100644
    --- a/tests/functional/metrics/test_metric_configs.py
    +++ b/tests/functional/metrics/test_metric_configs.py
    @@ -1,7 +1,7 @@
     import pytest
     from hologram import ValidationError
     from dbt.contracts.graph.model_config import MetricConfig
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     from dbt.tests.util import run_dbt, update_config_file, get_manifest
     
     
    @@ -11,7 +11,7 @@
         disabled_metric_level_schema_yml,
         enabled_metric_level_schema_yml,
         models_people_metrics_sql,
    -    invalid_config_metric_yml
    +    invalid_config_metric_yml,
     )
     
     
    @@ -106,7 +106,7 @@ def test_metrics_all_configs(self, project):
             assert config_test_table == pytest.expected_config
     
     
    -# Test CompilationException if a model references a disabled metric
    +# Test CompilationError if a model references a disabled metric
     class TestDisabledMetricRef(MetricConfigTests):
         @pytest.fixture(scope="class")
         def models(self):
    @@ -134,7 +134,7 @@ def test_disabled_metric_ref_model(self, project):
             }
     
             update_config_file(new_enabled_config, project.project_root, "dbt_project.yml")
    -        with pytest.raises(CompilationException):
    +        with pytest.raises(CompilationError):
                 run_dbt(["parse"])
     
     
    diff --git a/tests/functional/metrics/test_metric_helper_functions.py b/tests/functional/metrics/test_metric_helper_functions.py
    index c1b7a3487b6..da9a0046ba4 100644
    --- a/tests/functional/metrics/test_metric_helper_functions.py
    +++ b/tests/functional/metrics/test_metric_helper_functions.py
    @@ -3,10 +3,7 @@
     from dbt.tests.util import run_dbt, get_manifest
     from dbt.contracts.graph.metrics import ResolvedMetricReference
     
    -from tests.functional.metrics.fixtures import (
    -    models_people_sql,
    -    basic_metrics_yml
    -)
    +from tests.functional.metrics.fixtures import models_people_sql, basic_metrics_yml
     
     
     class TestMetricHelperFunctions:
    diff --git a/tests/functional/metrics/test_metrics.py b/tests/functional/metrics/test_metrics.py
    index 37446589cd2..adc55c3b996 100644
    --- a/tests/functional/metrics/test_metrics.py
    +++ b/tests/functional/metrics/test_metrics.py
    @@ -1,7 +1,8 @@
     import pytest
     
     from dbt.tests.util import run_dbt, get_manifest
    -from dbt.exceptions import ParsingException
    +from dbt.exceptions import ParsingError
    +
     
     from tests.functional.metrics.fixtures import (
         mock_purchase_data_csv,
    @@ -18,6 +19,9 @@
         invalid_derived_metric_contains_model_yml,
         derived_metric_yml,
         derived_metric_old_attr_names_yml,
    +    metric_without_timestamp_or_timegrains_yml,
    +    invalid_metric_without_timestamp_with_time_grains_yml,
    +    invalid_metric_without_timestamp_with_window_yml,
     )
     
     
    @@ -46,6 +50,33 @@ def test_simple_metric(
             assert metric_ids == expected_metric_ids
     
     
    +class TestSimpleMetricsNoTimestamp:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "people_metrics.yml": metric_without_timestamp_or_timegrains_yml,
    +            "people.sql": models_people_sql,
    +        }
    +
    +    def test_simple_metric_no_timestamp(
    +        self,
    +        project,
    +    ):
    +        # initial run
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +        manifest = get_manifest(project.project_root)
    +        metric_ids = list(manifest.metrics.keys())
    +        expected_metric_ids = [
    +            "metric.test.number_of_people",
    +        ]
    +        assert metric_ids == expected_metric_ids
    +
    +        # make sure the 'expression' metric depends on the two upstream metrics
    +        metric_test = manifest.metrics["metric.test.number_of_people"]
    +        assert metric_test.timestamp is None
    +
    +
     class TestInvalidRefMetrics:
         @pytest.fixture(scope="class")
         def models(self):
    @@ -54,14 +85,14 @@ def models(self):
                 "people.sql": models_people_sql,
             }
     
    -    # tests that we get a ParsingException with an invalid model ref, where
    +    # tests that we get a ParsingError with an invalid model ref, where
         # the model name does not have quotes
         def test_simple_metric(
             self,
             project,
         ):
             # initial run
    -        with pytest.raises(ParsingException):
    +        with pytest.raises(ParsingError):
                 run_dbt(["run"])
     
     
    @@ -73,14 +104,14 @@ def models(self):
                 "people.sql": models_people_sql,
             }
     
    -    # tests that we get a ParsingException with an invalid model ref, where
    +    # tests that we get a ParsingError with an invalid model ref, where
         # the model name does not have quotes
         def test_simple_metric(
             self,
             project,
         ):
             # initial run
    -        with pytest.raises(ParsingException):
    +        with pytest.raises(ParsingError):
                 run_dbt(["run"])
     
     
    @@ -92,13 +123,13 @@ def models(self):
                 "people.sql": models_people_sql,
             }
     
    -    # tests that we get a ParsingException with a missing expression
    +    # tests that we get a ParsingError with a missing expression
         def test_simple_metric(
             self,
             project,
         ):
             # initial run
    -        with pytest.raises(ParsingException):
    +        with pytest.raises(ParsingError):
                 run_dbt(["run"])
     
     
    @@ -111,7 +142,7 @@ def models(self):
             }
     
         def test_names_with_spaces(self, project):
    -        with pytest.raises(ParsingException) as exc:
    +        with pytest.raises(ParsingError) as exc:
                 run_dbt(["run"])
             assert "cannot contain spaces" in str(exc.value)
     
    @@ -125,7 +156,7 @@ def models(self):
             }
     
         def test_names_with_special_char(self, project):
    -        with pytest.raises(ParsingException) as exc:
    +        with pytest.raises(ParsingError) as exc:
                 run_dbt(["run"])
             assert "must contain only letters, numbers and underscores" in str(exc.value)
     
    @@ -139,7 +170,7 @@ def models(self):
             }
     
         def test_names_with_leading_number(self, project):
    -        with pytest.raises(ParsingException) as exc:
    +        with pytest.raises(ParsingError) as exc:
                 run_dbt(["run"])
             assert "must begin with a letter" in str(exc.value)
     
    @@ -153,7 +184,7 @@ def models(self):
             }
     
         def test_long_name(self, project):
    -        with pytest.raises(ParsingException) as exc:
    +        with pytest.raises(ParsingError) as exc:
                 run_dbt(["run"])
             assert "cannot contain more than 250 characters" in str(exc.value)
     
    @@ -167,7 +198,7 @@ def models(self):
             }
     
         def test_invalid_derived_metrics(self, project):
    -        with pytest.raises(ParsingException):
    +        with pytest.raises(ParsingError):
                 run_dbt(["run"])
     
     
    @@ -253,3 +284,41 @@ def models(self):
                 "derived_metric.yml": derived_metric_old_attr_names_yml,
                 "downstream_model.sql": downstream_model_sql,
             }
    +
    +
    +class TestInvalidTimestampTimeGrainsMetrics:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "people_metrics.yml": invalid_metric_without_timestamp_with_time_grains_yml,
    +            "people.sql": models_people_sql,
    +        }
    +
    +    # Tests that we get a ParsingError with an invalid metric definition.
    +    # This metric definition is missing timestamp but HAS a time_grains property
    +    def test_simple_metric(
    +        self,
    +        project,
    +    ):
    +        # initial run
    +        with pytest.raises(ParsingError):
    +            run_dbt(["run"])
    +
    +
    +class TestInvalidTimestampWindowMetrics:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "people_metrics.yml": invalid_metric_without_timestamp_with_window_yml,
    +            "people.sql": models_people_sql,
    +        }
    +
    +    # Tests that we get a ParsingError with an invalid metric definition.
    +    # This metric definition is missing timestamp but HAS a window property
    +    def test_simple_metric(
    +        self,
    +        project,
    +    ):
    +        # initial run
    +        with pytest.raises(ParsingError):
    +            run_dbt(["run"])
    diff --git a/tests/functional/minimal_cli/fixtures.py b/tests/functional/minimal_cli/fixtures.py
    index ac746389c6d..35a9bc97b2b 100644
    --- a/tests/functional/minimal_cli/fixtures.py
    +++ b/tests/functional/minimal_cli/fixtures.py
    @@ -51,7 +51,6 @@
     
     
     class BaseConfigProject:
    -
         @pytest.fixture(scope="class")
         def project_config_update(self):
             return {
    @@ -59,11 +58,7 @@ def project_config_update(self):
                 "profile": "jaffle_shop",
                 "version": "0.1.0",
                 "config-version": 2,
    -            "clean-targets": [
    -                "target",
    -                "dbt_packages",
    -                "logs"
    -            ]
    +            "clean-targets": ["target", "dbt_packages", "logs"],
             }
     
         @pytest.fixture(scope="class")
    @@ -78,23 +73,16 @@ def profiles_config_update(self):
                             "host": "localhost",
                             "user": "root",
                             "port": 5432,
    -                        "password": "password"
    +                        "password": "password",
                         }
                     },
    -                "target": "dev"
    +                "target": "dev",
                 }
             }
     
         @pytest.fixture(scope="class")
         def packages(self):
    -        return {
    -            "packages": [
    -                {
    -                    "package": "dbt-labs/dbt_utils",
    -                    "version": "1.0.0"
    -                }
    -            ]
    -        }
    +        return {"packages": [{"package": "dbt-labs/dbt_utils", "version": "1.0.0"}]}
     
         @pytest.fixture(scope="class")
         def models(self):
    @@ -105,9 +93,7 @@ def models(self):
     
         @pytest.fixture(scope="class")
         def snapshots(self):
    -        return {
    -            "sample_snapshot.sql": snapshots__sample_snapshot
    -        }
    +        return {"sample_snapshot.sql": snapshots__sample_snapshot}
     
         @pytest.fixture(scope="class")
         def seeds(self):
    diff --git a/tests/functional/minimal_cli/test_minimal_cli.py b/tests/functional/minimal_cli/test_minimal_cli.py
    index a87c0c95f93..ae8f40dcfcc 100644
    --- a/tests/functional/minimal_cli/test_minimal_cli.py
    +++ b/tests/functional/minimal_cli/test_minimal_cli.py
    @@ -7,43 +7,44 @@
     
     class TestMinimalCli(BaseConfigProject):
         """Test the minimal/happy-path for the CLI using the Click CliRunner"""
    +
         @pytest.fixture(scope="class")
         def runner(self):
             return CliRunner()
     
         def test_clean(self, runner, project):
    -        result = runner.invoke(cli, ['clean'])
    -        assert 'target' in result.output
    -        assert 'dbt_packages' in result.output
    -        assert 'logs' in result.output
    +        result = runner.invoke(cli, ["clean"])
    +        assert "target" in result.output
    +        assert "dbt_packages" in result.output
    +        assert "logs" in result.output
     
         def test_deps(self, runner, project):
    -        result = runner.invoke(cli, ['deps'])
    -        assert 'dbt-labs/dbt_utils' in result.output
    -        assert '1.0.0' in result.output
    +        result = runner.invoke(cli, ["deps"])
    +        assert "dbt-labs/dbt_utils" in result.output
    +        assert "1.0.0" in result.output
     
         def test_ls(self, runner, project):
    -        runner.invoke(cli, ['deps'])
    -        ls_result = runner.invoke(cli, ['ls'])
    -        assert '1 seed' in ls_result.output
    -        assert '1 model' in ls_result.output
    -        assert '5 tests' in ls_result.output
    -        assert '1 snapshot' in ls_result.output
    +        runner.invoke(cli, ["deps"])
    +        ls_result = runner.invoke(cli, ["ls"])
    +        assert "1 seed" in ls_result.output
    +        assert "1 model" in ls_result.output
    +        assert "5 tests" in ls_result.output
    +        assert "1 snapshot" in ls_result.output
     
         def test_build(self, runner, project):
    -        runner.invoke(cli, ['deps'])
    -        result = runner.invoke(cli, ['build'])
    +        runner.invoke(cli, ["deps"])
    +        result = runner.invoke(cli, ["build"])
             # 1 seed, 1 model, 2 tests
    -        assert 'PASS=4' in result.output
    +        assert "PASS=4" in result.output
             # 2 tests
    -        assert 'ERROR=2' in result.output
    +        assert "ERROR=2" in result.output
             # Singular test
    -        assert 'WARN=1' in result.output
    +        assert "WARN=1" in result.output
             # 1 snapshot
    -        assert 'SKIP=1' in result.output
    +        assert "SKIP=1" in result.output
     
         def test_docs_generate(self, runner, project):
    -        runner.invoke(cli, ['deps'])
    -        result = runner.invoke(cli, ['docs', 'generate'])
    -        assert 'Building catalog' in result.output
    -        assert 'Catalog written' in result.output
    +        runner.invoke(cli, ["deps"])
    +        result = runner.invoke(cli, ["docs", "generate"])
    +        assert "Building catalog" in result.output
    +        assert "Catalog written" in result.output
    diff --git a/tests/functional/partial_parsing/fixtures.py b/tests/functional/partial_parsing/fixtures.py
    new file mode 100644
    index 00000000000..7681b9dcb8c
    --- /dev/null
    +++ b/tests/functional/partial_parsing/fixtures.py
    @@ -0,0 +1,1126 @@
    +local_dependency__dbt_project_yml = """
    +
    +name: 'local_dep'
    +version: '1.0'
    +config-version: 2
    +
    +profile: 'default'
    +
    +model-paths: ["models"]
    +analysis-paths: ["analyses"]
    +test-paths: ["tests"]
    +seed-paths: ["seeds"]
    +macro-paths: ["macros"]
    +
    +require-dbt-version: '>=0.1.0'
    +
    +target-path: "target"  # directory which will store compiled SQL files
    +clean-targets:         # directories to be removed by `dbt clean`
    +    - "target"
    +    - "dbt_packages"
    +
    +
    +seeds:
    +  quote_columns: False
    +
    +"""
    +
    +local_dependency__models__schema_yml = """
    +version: 2
    +sources:
    +  - name: seed_source
    +    schema: "{{ var('schema_override', target.schema) }}"
    +    tables:
    +      - name: "seed"
    +        columns:
    +          - name: id
    +            tests:
    +              - unique
    +
    +"""
    +
    +local_dependency__models__model_to_import_sql = """
    +select * from {{ ref('seed') }}
    +
    +"""
    +
    +local_dependency__macros__dep_macro_sql = """
    +{% macro some_overridden_macro() -%}
    +100
    +{%- endmacro %}
    +
    +"""
    +
    +local_dependency__seeds__seed_csv = """id
    +1
    +"""
    +
    +empty_schema_with_version_yml = """
    +version: 2
    +
    +"""
    +
    +schema_sources5_yml = """
    +version: 2
    +
    +sources:
    +  - name: seed_sources
    +    schema: "{{ target.schema }}"
    +    tables:
    +      - name: raw_customers
    +        columns:
    +          - name: id
    +            tests:
    +              - not_null:
    +                  severity: "{{ 'error' if target.name == 'prod' else 'warn' }}"
    +              - unique
    +          - name: first_name
    +          - name: last_name
    +          - name: email
    +
    +seeds:
    +  - name: rad_customers
    +    description: "Raw customer data"
    +    columns:
    +      - name: id
    +        tests:
    +          - unique
    +          - not_null
    +      - name: first_name
    +      - name: last_name
    +      - name: email
    +
    +
    +"""
    +
    +my_macro2_sql = """
    +{% macro do_something(foo2, bar2) %}
    +
    +    select
    +        'foo' as foo2,
    +        'var' as bar2
    +
    +{% endmacro %}
    +
    +"""
    +
    +raw_customers_csv = """id,first_name,last_name,email
    +1,Michael,Perez,mperez0@chronoengine.com
    +2,Shawn,Mccoy,smccoy1@reddit.com
    +3,Kathleen,Payne,kpayne2@cargocollective.com
    +4,Jimmy,Cooper,jcooper3@cargocollective.com
    +5,Katherine,Rice,krice4@typepad.com
    +6,Sarah,Ryan,sryan5@gnu.org
    +7,Martin,Mcdonald,mmcdonald6@opera.com
    +8,Frank,Robinson,frobinson7@wunderground.com
    +9,Jennifer,Franklin,jfranklin8@mail.ru
    +10,Henry,Welch,hwelch9@list-manage.com
    +"""
    +
    +model_three_disabled2_sql = """
    +- Disabled model
    +{{ config(materialized='table', enabled=False) }}
    +
    +with source_data as (
    +
    +    select 1 as id
    +    union all
    +    select null as id
    +
    +)
    +
    +select *
    +from source_data
    +
    +"""
    +
    +schema_sources4_yml = """
    +version: 2
    +
    +sources:
    +  - name: seed_sources
    +    schema: "{{ target.schema }}"
    +    tables:
    +      - name: raw_customers
    +        columns:
    +          - name: id
    +            tests:
    +              - not_null:
    +                  severity: "{{ 'error' if target.name == 'prod' else 'warn' }}"
    +              - unique
    +              - every_value_is_blue
    +          - name: first_name
    +          - name: last_name
    +          - name: email
    +
    +seeds:
    +  - name: raw_customers
    +    description: "Raw customer data"
    +    columns:
    +      - name: id
    +        tests:
    +          - unique
    +          - not_null
    +      - name: first_name
    +      - name: last_name
    +      - name: email
    +
    +
    +"""
    +
    +env_var_schema_yml = """
    +version: 2
    +
    +models:
    +    - name: model_one
    +      config:
    +        materialized: "{{ env_var('TEST_SCHEMA_VAR') }}"
    +
    +"""
    +
    +my_test_sql = """
    +select
    +   * from {{ ref('customers') }} where first_name = '{{ macro_something() }}'
    +
    +"""
    +
    +empty_schema_yml = """
    +
    +"""
    +
    +schema_models_c_yml = """
    +version: 2
    +
    +sources:
    +  - name: seed_source
    +    description: "This is a source override"
    +    overrides: local_dep
    +    schema: "{{ var('schema_override', target.schema) }}"
    +    tables:
    +      - name: "seed"
    +        columns:
    +          - name: id
    +            tests:
    +              - unique
    +              - not_null
    +
    +"""
    +
    +env_var_sources_yml = """
    +version: 2
    +sources:
    +  - name: seed_sources
    +    schema: "{{ target.schema }}"
    +    database: "{{ env_var('ENV_VAR_DATABASE') }}"
    +    tables:
    +      - name: raw_customers
    +        columns:
    +          - name: id
    +            tests:
    +              - not_null:
    +                  severity: "{{ env_var('ENV_VAR_SEVERITY') }}"
    +              - unique
    +          - name: first_name
    +          - name: last_name
    +          - name: email
    +
    +
    +
    +"""
    +
    +generic_test_edited_sql = """
    +{% test is_odd(model, column_name) %}
    +
    +with validation as (
    +
    +    select
    +        {{ column_name }} as odd_field2
    +
    +    from {{ model }}
    +
    +),
    +
    +validation_errors as (
    +
    +    select
    +        odd_field2
    +
    +    from validation
    +    -- if this is true, then odd_field is actually even!
    +    where (odd_field2 % 2) = 0
    +
    +)
    +
    +select *
    +from validation_errors
    +
    +{% endtest %}
    +"""
    +
    +schema_sources1_yml = """
    +version: 2
    +sources:
    +  - name: seed_sources
    +    schema: "{{ target.schema }}"
    +    tables:
    +      - name: raw_customers
    +        columns:
    +          - name: id
    +            tests:
    +              - not_null:
    +                  severity: "{{ 'error' if target.name == 'prod' else 'warn' }}"
    +              - unique
    +          - name: first_name
    +          - name: last_name
    +          - name: email
    +
    +
    +
    +"""
    +
    +schema_sources3_yml = """
    +version: 2
    +
    +sources:
    +  - name: seed_sources
    +    schema: "{{ target.schema }}"
    +    tables:
    +      - name: raw_customers
    +        columns:
    +          - name: id
    +            tests:
    +              - not_null:
    +                  severity: "{{ 'error' if target.name == 'prod' else 'warn' }}"
    +              - unique
    +          - name: first_name
    +          - name: last_name
    +          - name: email
    +
    +exposures:
    +  - name: proxy_for_dashboard
    +    description: "This is for the XXX dashboard"
    +    type: "dashboard"
    +    owner:
    +      name: "Dashboard Tester"
    +      email: "tester@dashboard.com"
    +    depends_on:
    +      - ref("model_one")
    +      - source("seed_sources", "raw_customers")
    +
    +
    +"""
    +
    +my_analysis_sql = """
    +select * from customers
    +
    +"""
    +
    +schema_sources2_yml = """
    +version: 2
    +
    +sources:
    +  - name: seed_sources
    +    schema: "{{ target.schema }}"
    +    tables:
    +      - name: raw_customers
    +        columns:
    +          - name: id
    +            tests:
    +              - not_null:
    +                  severity: "{{ 'error' if target.name == 'prod' else 'warn' }}"
    +              - unique
    +          - name: first_name
    +          - name: last_name
    +          - name: email
    +
    +exposures:
    +  - name: proxy_for_dashboard
    +    description: "This is for the XXX dashboard"
    +    type: "dashboard"
    +    owner:
    +      name: "Dashboard Tester"
    +      email: "tester@dashboard.com"
    +    depends_on:
    +      - ref("model_one")
    +      - ref("raw_customers")
    +      - source("seed_sources", "raw_customers")
    +
    +
    +"""
    +
    +model_color_sql = """
    +select 'blue' as fun
    +
    +"""
    +
    +my_metric_yml = """
    +version: 2
    +metrics:
    +  - name: new_customers
    +    label: New Customers
    +    model: customers
    +    description: "The number of paid customers who are using the product"
    +    calculation_method: count
    +    expression: user_id
    +    timestamp: signup_date
    +    time_grains: [day, week, month]
    +    dimensions:
    +      - plan
    +      - country
    +    filters:
    +      - field: is_paying
    +        value: True
    +        operator: '='
    +    +meta:
    +        is_okr: True
    +    tags:
    +      - okrs
    +
    +
    +
    +"""
    +
    +env_var_schema2_yml = """
    +version: 2
    +
    +models:
    +    - name: model_one
    +      config:
    +        materialized: "{{ env_var('TEST_SCHEMA_VAR') }}"
    +      tests:
    +        - check_color:
    +            column_name: fun
    +            color: "env_var('ENV_VAR_COLOR')"
    +
    +
    +"""
    +
    +gsm_override_sql = """
    +- custom macro
    +{% macro generate_schema_name(schema_name, node) %}
    +
    +    {{ schema_name }}_{{ target.schema }}
    +
    +{% endmacro %}
    +
    +"""
    +
    +model_four1_sql = """
    +select * from {{ ref('model_three') }}
    +
    +"""
    +
    +model_one_sql = """
    +select 1 as fun
    +
    +"""
    +
    +env_var_schema3_yml = """
    +version: 2
    +
    +models:
    +    - name: model_one
    +      config:
    +        materialized: "{{ env_var('TEST_SCHEMA_VAR') }}"
    +      tests:
    +        - check_color:
    +            column_name: fun
    +            color: "env_var('ENV_VAR_COLOR')"
    +
    +exposures:
    +  - name: proxy_for_dashboard
    +    description: "This is for the XXX dashboard"
    +    type: "dashboard"
    +    owner:
    +      name: "{{ env_var('ENV_VAR_OWNER') }}"
    +      email: "tester@dashboard.com"
    +    depends_on:
    +      - ref("model_color")
    +      - source("seed_sources", "raw_customers")
    +
    +"""
    +
    +env_var_metrics_yml = """
    +version: 2
    +
    +metrics:
    +
    +  - model: "ref('people')"
    +    name: number_of_people
    +    description: Total count of people
    +    label: "Number of people"
    +    calculation_method: count
    +    expression: "*"
    +    timestamp: created_at
    +    time_grains: [day, week, month]
    +    dimensions:
    +      - favorite_color
    +      - loves_dbt
    +    meta:
    +        my_meta: '{{ env_var("ENV_VAR_METRICS") }}'
    +
    +  - model: "ref('people')"
    +    name: collective_tenure
    +    description: Total number of years of team experience
    +    label: "Collective tenure"
    +    calculation_method: sum
    +    expression: tenure
    +    timestamp: created_at
    +    time_grains: [day]
    +    filters:
    +      - field: loves_dbt
    +        operator: is
    +        value: 'true'
    +
    +"""
    +
    +customers_sql = """
    +with source as (
    +
    +    select * from {{ source('seed_sources', 'raw_customers') }}
    +
    +),
    +
    +renamed as (
    +
    +    select
    +        id as customer_id,
    +        first_name,
    +        last_name,
    +        email
    +
    +    from source
    +
    +)
    +
    +select * from renamed
    +
    +"""
    +
    +model_four2_sql = """
    +select fun from {{ ref('model_one') }}
    +
    +"""
    +
    +env_var_model_sql = """
    +select '{{ env_var('ENV_VAR_TEST') }}' as vartest
    +
    +"""
    +
    +env_var_model_one_sql = """
    +select 'blue' as fun
    +
    +"""
    +
    +custom_schema_tests2_sql = """
    +{% test type_one(model) %}
    +
    +    select * from (
    +
    +        select * from {{ model }}
    +        union all
    +        select * from {{ ref('model_b') }}
    +
    +    ) as Foo
    +
    +{% endtest %}
    +
    +{% test type_two(model) %}
    +
    +    {{ config(severity = "ERROR") }}
    +
    +    select * from {{ model }}
    +
    +{% endtest %}
    +
    +"""
    +
    +metric_model_a_sql = """
    +{%
    +    set metric_list = [
    +        metric('number_of_people'),
    +        metric('collective_tenure')
    +    ]
    +%}
    +
    +{% if not execute %}
    +
    +    {% set metric_names = [] %}
    +    {% for m in metric_list %}
    +        {% do metric_names.append(m.metric_name) %}
    +    {% endfor %}
    +
    +    -- this config does nothing, but it lets us check these values
    +    {{ config(metric_names = metric_names) }}
    +
    +{% endif %}
    +
    +
    +select 1 as fun
    +
    +"""
    +
    +model_b_sql = """
    +select 1 as notfun
    +
    +"""
    +
    +customers2_md = """
    +{% docs customer_table %}
    +
    +LOTS of customer data
    +
    +{% enddocs %}
    +
    +"""
    +
    +custom_schema_tests1_sql = """
    +{% test type_one(model) %}
    +
    +    select * from (
    +
    +        select * from {{ model }}
    +        union all
    +        select * from {{ ref('model_b') }}
    +
    +    ) as Foo
    +
    +{% endtest %}
    +
    +{% test type_two(model) %}
    +
    +    {{ config(severity = "WARN") }}
    +
    +    select * from {{ model }}
    +
    +{% endtest %}
    +
    +"""
    +
    +people_metrics_yml = """
    +version: 2
    +
    +metrics:
    +
    +  - model: "ref('people')"
    +    name: number_of_people
    +    description: Total count of people
    +    label: "Number of people"
    +    calculation_method: count
    +    expression: "*"
    +    timestamp: created_at
    +    time_grains: [day, week, month]
    +    dimensions:
    +      - favorite_color
    +      - loves_dbt
    +    meta:
    +        my_meta: 'testing'
    +
    +  - model: "ref('people')"
    +    name: collective_tenure
    +    description: Total number of years of team experience
    +    label: "Collective tenure"
    +    calculation_method: sum
    +    expression: tenure
    +    timestamp: created_at
    +    time_grains: [day]
    +    filters:
    +      - field: loves_dbt
    +        operator: is
    +        value: 'true'
    +
    +"""
    +
    +people_sql = """
    +select 1 as id, 'Drew' as first_name, 'Banin' as last_name, 'yellow' as favorite_color, true as loves_dbt, 5 as tenure, current_timestamp as created_at
    +union all
    +select 1 as id, 'Jeremy' as first_name, 'Cohen' as last_name, 'indigo' as favorite_color, true as loves_dbt, 4 as tenure, current_timestamp as created_at
    +
    +"""
    +
    +orders_sql = """
    +select 1 as id, 101 as user_id, 'pending' as status
    +
    +"""
    +
    +model_a_sql = """
    +select 1 as fun
    +
    +"""
    +
    +model_three_disabled_sql = """
    +{{ config(materialized='table', enabled=False) }}
    +
    +with source_data as (
    +
    +    select 1 as id
    +    union all
    +    select null as id
    +
    +)
    +
    +select *
    +from source_data
    +
    +"""
    +
    +models_schema2b_yml = """
    +version: 2
    +
    +models:
    +    - name: model_one
    +      description: "The first model"
    +    - name: model_three
    +      description: "The third model"
    +      columns:
    +        - name: id
    +          tests:
    +            - not_null
    +
    +"""
    +
    +env_var_macros_yml = """
    +version: 2
    +macros:
    +    - name: do_something
    +      description: "This is a test macro"
    +      meta:
    +          some_key: "{{ env_var('ENV_VAR_SOME_KEY') }}"
    +
    +
    +"""
    +
    +models_schema4_yml = """
    +version: 2
    +
    +models:
    +    - name: model_one
    +      description: "The first model"
    +    - name: model_three
    +      description: "The third model"
    +      config:
    +        enabled: false
    +      columns:
    +        - name: id
    +          tests:
    +            - unique
    +
    +"""
    +
    +model_two_sql = """
    +select 1 as notfun
    +
    +"""
    +
    +generic_test_schema_yml = """
    +version: 2
    +
    +models:
    +  - name: orders
    +    description: "Some order data"
    +    columns:
    +      - name: id
    +        tests:
    +          - unique
    +          - is_odd
    +
    +"""
    +
    +customers1_md = """
    +{% docs customer_table %}
    +
    +This table contains customer data
    +
    +{% enddocs %}
    +
    +"""
    +
    +model_three_modified_sql = """
    +{{ config(materialized='table') }}
    +
    +with source_data as (
    +
    +    {#- This is model three #}
    +
    +    select 1 as id
    +    union all
    +    select null as id
    +
    +)
    +
    +select *
    +from source_data
    +
    +"""
    +
    +macros_yml = """
    +version: 2
    +macros:
    +    - name: do_something
    +      description: "This is a test macro"
    +
    +"""
    +
    +test_color_sql = """
    +{% test check_color(model, column_name, color) %}
    +
    +    select *
    +    from {{ model }}
    +    where {{ column_name }} = '{{ color }}'
    +
    +{% endtest %}
    +
    +"""
    +
    +models_schema2_yml = """
    +version: 2
    +
    +models:
    +    - name: model_one
    +      description: "The first model"
    +    - name: model_three
    +      description: "The third model"
    +      columns:
    +        - name: id
    +          tests:
    +            - unique
    +
    +"""
    +
    +gsm_override2_sql = """
    +- custom macro xxxx
    +{% macro generate_schema_name(schema_name, node) %}
    +
    +    {{ schema_name }}_{{ target.schema }}
    +
    +{% endmacro %}
    +
    +"""
    +
    +models_schema3_yml = """
    +version: 2
    +
    +models:
    +    - name: model_one
    +      description: "The first model"
    +    - name: model_three
    +      description: "The third model"
    +      tests:
    +          - unique
    +macros:
    +    - name: do_something
    +      description: "This is a test macro"
    +
    +"""
    +
    +generic_test_sql = """
    +{% test is_odd(model, column_name) %}
    +
    +with validation as (
    +
    +    select
    +        {{ column_name }} as odd_field
    +
    +    from {{ model }}
    +
    +),
    +
    +validation_errors as (
    +
    +    select
    +        odd_field
    +
    +    from validation
    +    -- if this is true, then odd_field is actually even!
    +    where (odd_field % 2) = 0
    +
    +)
    +
    +select *
    +from validation_errors
    +
    +{% endtest %}
    +"""
    +
    +env_var_model_test_yml = """
    +version: 2
    +models:
    +  - name: model_color
    +    columns:
    +      - name: fun
    +        tests:
    +          - unique:
    +              enabled: "{{ env_var('ENV_VAR_ENABLED', True) }}"
    +
    +"""
    +
    +model_three_sql = """
    +{{ config(materialized='table') }}
    +
    +with source_data as (
    +
    +    select 1 as id
    +    union all
    +    select null as id
    +
    +)
    +
    +select *
    +from source_data
    +
    +"""
    +
    +ref_override2_sql = """
    +- Macro to override ref xxxx
    +{% macro ref(modelname) %}
    +{% do return(builtins.ref(modelname)) %}
    +{% endmacro %}
    +
    +"""
    +
    +models_schema1_yml = """
    +version: 2
    +
    +models:
    +    - name: model_one
    +      description: "The first model"
    +
    +"""
    +
    +macros_schema_yml = """
    +
    +version: 2
    +
    +models:
    +    - name: model_a
    +      tests:
    +        - type_one
    +        - type_two
    +
    +"""
    +
    +my_macro_sql = """
    +{% macro do_something(foo2, bar2) %}
    +
    +    select
    +        '{{ foo2 }}' as foo2,
    +        '{{ bar2 }}' as bar2
    +
    +{% endmacro %}
    +
    +"""
    +
    +snapshot_sql = """
    +{% snapshot orders_snapshot %}
    +
    +{{
    +    config(
    +      target_schema=schema,
    +      strategy='check',
    +      unique_key='id',
    +      check_cols=['status'],
    +    )
    +}}
    +
    +select * from {{ ref('orders') }}
    +
    +{% endsnapshot %}
    +
    +{% snapshot orders2_snapshot %}
    +
    +{{
    +    config(
    +      target_schema=schema,
    +      strategy='check',
    +      unique_key='id',
    +      check_cols=['order_date'],
    +    )
    +}}
    +
    +select * from {{ ref('orders') }}
    +
    +{% endsnapshot %}
    +
    +"""
    +
    +models_schema4b_yml = """
    +version: 2
    +
    +models:
    +    - name: model_one
    +      description: "The first model"
    +    - name: model_three
    +      description: "The third model"
    +      config:
    +        enabled: true
    +      columns:
    +        - name: id
    +          tests:
    +            - unique
    +
    +"""
    +
    +test_macro_sql = """
    +{% macro macro_something() %}
    +
    +    {% do return('macro_something') %}
    +
    +{% endmacro %}
    +
    +"""
    +
    +people_metrics2_yml = """
    +version: 2
    +
    +metrics:
    +
    +  - model: "ref('people')"
    +    name: number_of_people
    +    description: Total count of people
    +    label: "Number of people"
    +    calculation_method: count
    +    expression: "*"
    +    timestamp: created_at
    +    time_grains: [day, week, month]
    +    dimensions:
    +      - favorite_color
    +      - loves_dbt
    +    meta:
    +        my_meta: 'replaced'
    +
    +  - model: "ref('people')"
    +    name: collective_tenure
    +    description: Total number of years of team experience
    +    label: "Collective tenure"
    +    calculation_method: sum
    +    expression: tenure
    +    timestamp: created_at
    +    time_grains: [day]
    +    filters:
    +      - field: loves_dbt
    +        operator: is
    +        value: 'true'
    +
    +"""
    +
    +generic_schema_yml = """
    +version: 2
    +
    +models:
    +  - name: orders
    +    description: "Some order data"
    +    columns:
    +      - name: id
    +        tests:
    +          - unique
    +
    +"""
    +
    +snapshot2_sql = """
    +- add a comment
    +{% snapshot orders_snapshot %}
    +
    +{{
    +    config(
    +      target_schema=schema,
    +      strategy='check',
    +      unique_key='id',
    +      check_cols=['status'],
    +    )
    +}}
    +
    +select * from {{ ref('orders') }}
    +
    +{% endsnapshot %}
    +
    +{% snapshot orders2_snapshot %}
    +
    +{{
    +    config(
    +      target_schema=schema,
    +      strategy='check',
    +      unique_key='id',
    +      check_cols=['order_date'],
    +    )
    +}}
    +
    +select * from {{ ref('orders') }}
    +
    +{% endsnapshot %}
    +
    +"""
    +
    +sources_tests2_sql = """
    +
    +{% test every_value_is_blue(model, column_name) %}
    +
    +    select *
    +    from {{ model }}
    +    where {{ column_name }} != 99
    +
    +{% endtest %}
    +
    +
    +"""
    +
    +people_metrics3_yml = """
    +version: 2
    +
    +metrics:
    +
    +  - model: "ref('people')"
    +    name: number_of_people
    +    description: Total count of people
    +    label: "Number of people"
    +    calculation_method: count
    +    expression: "*"
    +    timestamp: created_at
    +    time_grains: [day, week, month]
    +    dimensions:
    +      - favorite_color
    +      - loves_dbt
    +    meta:
    +        my_meta: 'replaced'
    +
    +"""
    +
    +ref_override_sql = """
    +- Macro to override ref
    +{% macro ref(modelname) %}
    +{% do return(builtins.ref(modelname)) %}
    +{% endmacro %}
    +
    +"""
    +
    +test_macro2_sql = """
    +{% macro macro_something() %}
    +
    +    {% do return('some_name') %}
    +
    +{% endmacro %}
    +
    +"""
    +
    +env_var_macro_sql = """
    +{% macro do_something(foo2, bar2) %}
    +
    +    select
    +        '{{ foo2 }}' as foo2,
    +        '{{ bar2 }}' as bar2
    +
    +{% endmacro %}
    +
    +"""
    +
    +sources_tests1_sql = """
    +
    +{% test every_value_is_blue(model, column_name) %}
    +
    +    select *
    +    from {{ model }}
    +    where {{ column_name }} = 9999
    +
    +{% endtest %}
    +
    +
    +"""
    diff --git a/tests/functional/partial_parsing/test_partial_parsing.py b/tests/functional/partial_parsing/test_partial_parsing.py
    new file mode 100644
    index 00000000000..f70b2e0f9fa
    --- /dev/null
    +++ b/tests/functional/partial_parsing/test_partial_parsing.py
    @@ -0,0 +1,643 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt, get_manifest, write_file, rm_file, run_dbt_and_capture
    +from dbt.tests.fixtures.project import write_project_files
    +from tests.functional.partial_parsing.fixtures import (
    +    model_one_sql,
    +    model_two_sql,
    +    models_schema1_yml,
    +    models_schema2_yml,
    +    models_schema2b_yml,
    +    model_three_sql,
    +    model_three_modified_sql,
    +    model_four1_sql,
    +    model_four2_sql,
    +    models_schema4_yml,
    +    models_schema4b_yml,
    +    models_schema3_yml,
    +    my_macro_sql,
    +    my_macro2_sql,
    +    macros_yml,
    +    empty_schema_yml,
    +    empty_schema_with_version_yml,
    +    model_three_disabled_sql,
    +    model_three_disabled2_sql,
    +    raw_customers_csv,
    +    customers_sql,
    +    sources_tests1_sql,
    +    schema_sources1_yml,
    +    schema_sources2_yml,
    +    schema_sources3_yml,
    +    schema_sources4_yml,
    +    schema_sources5_yml,
    +    customers1_md,
    +    customers2_md,
    +    test_macro_sql,
    +    my_test_sql,
    +    test_macro2_sql,
    +    my_analysis_sql,
    +    sources_tests2_sql,
    +    local_dependency__dbt_project_yml,
    +    local_dependency__models__schema_yml,
    +    local_dependency__models__model_to_import_sql,
    +    local_dependency__macros__dep_macro_sql,
    +    local_dependency__seeds__seed_csv,
    +    schema_models_c_yml,
    +    model_a_sql,
    +    model_b_sql,
    +    macros_schema_yml,
    +    custom_schema_tests1_sql,
    +    custom_schema_tests2_sql,
    +    ref_override_sql,
    +    ref_override2_sql,
    +    gsm_override_sql,
    +    gsm_override2_sql,
    +    orders_sql,
    +    snapshot_sql,
    +    snapshot2_sql,
    +    generic_schema_yml,
    +    generic_test_sql,
    +    generic_test_schema_yml,
    +    generic_test_edited_sql,
    +)
    +
    +from dbt.exceptions import CompilationError
    +from dbt.contracts.files import ParseFileType
    +from dbt.contracts.results import TestStatus
    +import re
    +import os
    +
    +os.environ["DBT_PP_TEST"] = "true"
    +
    +
    +def normalize(path):
    +    return os.path.normcase(os.path.normpath(path))
    +
    +
    +class TestModels:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_one.sql": model_one_sql,
    +        }
    +
    +    def test_pp_models(self, project):
    +        # initial run
    +        # run_dbt(['clean'])
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +
    +        # add a model file
    +        write_file(model_two_sql, project.project_root, "models", "model_two.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 2
    +
    +        # add a schema file
    +        write_file(models_schema1_yml, project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 2
    +        manifest = get_manifest(project.project_root)
    +        assert "model.test.model_one" in manifest.nodes
    +        model_one_node = manifest.nodes["model.test.model_one"]
    +        assert model_one_node.description == "The first model"
    +        assert model_one_node.patch_path == "test://" + normalize("models/schema.yml")
    +
    +        # add a model and a schema file (with a test) at the same time
    +        write_file(models_schema2_yml, project.project_root, "models", "schema.yml")
    +        write_file(model_three_sql, project.project_root, "models", "model_three.sql")
    +        results = run_dbt(["--partial-parse", "test"], expect_pass=False)
    +        assert len(results) == 1
    +        manifest = get_manifest(project.project_root)
    +        project_files = [f for f in manifest.files if f.startswith("test://")]
    +        assert len(project_files) == 4
    +        model_3_file_id = "test://" + normalize("models/model_three.sql")
    +        assert model_3_file_id in manifest.files
    +        model_three_file = manifest.files[model_3_file_id]
    +        assert model_three_file.parse_file_type == ParseFileType.Model
    +        assert type(model_three_file).__name__ == "SourceFile"
    +        model_three_node = manifest.nodes[model_three_file.nodes[0]]
    +        schema_file_id = "test://" + normalize("models/schema.yml")
    +        assert model_three_node.patch_path == schema_file_id
    +        assert model_three_node.description == "The third model"
    +        schema_file = manifest.files[schema_file_id]
    +        assert type(schema_file).__name__ == "SchemaSourceFile"
    +        assert len(schema_file.tests) == 1
    +        tests = schema_file.get_all_test_ids()
    +        assert tests == ["test.test.unique_model_three_id.6776ac8160"]
    +        unique_test_id = tests[0]
    +        assert unique_test_id in manifest.nodes
    +
    +        # modify model sql file, ensure description still there
    +        write_file(model_three_modified_sql, project.project_root, "models", "model_three.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        manifest = get_manifest(project.project_root)
    +        model_id = "model.test.model_three"
    +        assert model_id in manifest.nodes
    +        model_three_node = manifest.nodes[model_id]
    +        assert model_three_node.description == "The third model"
    +
    +        # Change the model 3 test from unique to not_null
    +        write_file(models_schema2b_yml, project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "test"], expect_pass=False)
    +        manifest = get_manifest(project.project_root)
    +        schema_file_id = "test://" + normalize("models/schema.yml")
    +        schema_file = manifest.files[schema_file_id]
    +        tests = schema_file.get_all_test_ids()
    +        assert tests == ["test.test.not_null_model_three_id.3162ce0a6f"]
    +        not_null_test_id = tests[0]
    +        assert not_null_test_id in manifest.nodes.keys()
    +        assert unique_test_id not in manifest.nodes.keys()
    +        assert len(results) == 1
    +
    +        # go back to previous version of schema file, removing patch, test, and model for model three
    +        write_file(models_schema1_yml, project.project_root, "models", "schema.yml")
    +        rm_file(project.project_root, "models", "model_three.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 2
    +
    +        # remove schema file, still have 3 models
    +        write_file(model_three_sql, project.project_root, "models", "model_three.sql")
    +        rm_file(project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +        manifest = get_manifest(project.project_root)
    +        schema_file_id = "test://" + normalize("models/schema.yml")
    +        assert schema_file_id not in manifest.files
    +        project_files = [f for f in manifest.files if f.startswith("test://")]
    +        assert len(project_files) == 3
    +
    +        # Put schema file back and remove a model
    +        # referred to in schema file
    +        write_file(models_schema2_yml, project.project_root, "models", "schema.yml")
    +        rm_file(project.project_root, "models", "model_three.sql")
    +        with pytest.raises(CompilationError):
    +            results = run_dbt(["--partial-parse", "--warn-error", "run"])
    +
    +        # Put model back again
    +        write_file(model_three_sql, project.project_root, "models", "model_three.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +
    +        # Add model four refing model three
    +        write_file(model_four1_sql, project.project_root, "models", "model_four.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 4
    +
    +        # Remove model_three and change model_four to ref model_one
    +        # and change schema file to remove model_three
    +        rm_file(project.project_root, "models", "model_three.sql")
    +        write_file(model_four2_sql, project.project_root, "models", "model_four.sql")
    +        write_file(models_schema1_yml, project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +
    +        # Remove model four, put back model three, put back schema file
    +        write_file(model_three_sql, project.project_root, "models", "model_three.sql")
    +        write_file(models_schema2_yml, project.project_root, "models", "schema.yml")
    +        rm_file(project.project_root, "models", "model_four.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +
    +        # disable model three in the schema file
    +        write_file(models_schema4_yml, project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 2
    +
    +        # update enabled config to be true for model three in the schema file
    +        write_file(models_schema4b_yml, project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +
    +        # disable model three in the schema file again
    +        write_file(models_schema4_yml, project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 2
    +
    +        # remove disabled config for model three in the schema file to check it gets enabled
    +        write_file(models_schema4b_yml, project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +
    +        # Add a macro
    +        write_file(my_macro_sql, project.project_root, "macros", "my_macro.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +        manifest = get_manifest(project.project_root)
    +        macro_id = "macro.test.do_something"
    +        assert macro_id in manifest.macros
    +
    +        # Modify the macro
    +        write_file(my_macro2_sql, project.project_root, "macros", "my_macro.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +
    +        # Add a macro patch
    +        write_file(models_schema3_yml, project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +
    +        # Remove the macro
    +        rm_file(project.project_root, "macros", "my_macro.sql")
    +        with pytest.raises(CompilationError):
    +            results = run_dbt(["--partial-parse", "--warn-error", "run"])
    +
    +        # put back macro file, got back to schema file with no macro
    +        # add separate macro patch schema file
    +        write_file(models_schema2_yml, project.project_root, "models", "schema.yml")
    +        write_file(my_macro_sql, project.project_root, "macros", "my_macro.sql")
    +        write_file(macros_yml, project.project_root, "macros", "macros.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +
    +        # delete macro and schema file
    +        rm_file(project.project_root, "macros", "my_macro.sql")
    +        rm_file(project.project_root, "macros", "macros.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +
    +        # Add an empty schema file
    +        write_file(empty_schema_yml, project.project_root, "models", "eschema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +
    +        # Add version to empty schema file
    +        write_file(empty_schema_with_version_yml, project.project_root, "models", "eschema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +
    +        # Disable model_three
    +        write_file(model_three_disabled_sql, project.project_root, "models", "model_three.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 2
    +        manifest = get_manifest(project.project_root)
    +        model_id = "model.test.model_three"
    +        assert model_id in manifest.disabled
    +        assert model_id not in manifest.nodes
    +
    +        # Edit disabled model three
    +        write_file(model_three_disabled2_sql, project.project_root, "models", "model_three.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 2
    +        manifest = get_manifest(project.project_root)
    +        model_id = "model.test.model_three"
    +        assert model_id in manifest.disabled
    +        assert model_id not in manifest.nodes
    +
    +        # Remove disabled from model three
    +        write_file(model_three_sql, project.project_root, "models", "model_three.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +        manifest = get_manifest(project.project_root)
    +        model_id = "model.test.model_three"
    +        assert model_id in manifest.nodes
    +        assert model_id not in manifest.disabled
    +
    +
    +class TestSources:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_one.sql": model_one_sql,
    +        }
    +
    +    def test_pp_sources(self, project):
    +        # initial run
    +        write_file(raw_customers_csv, project.project_root, "seeds", "raw_customers.csv")
    +        write_file(sources_tests1_sql, project.project_root, "macros", "tests.sql")
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +
    +        # Partial parse running 'seed'
    +        run_dbt(["--partial-parse", "seed"])
    +        manifest = get_manifest(project.project_root)
    +        seed_file_id = "test://" + normalize("seeds/raw_customers.csv")
    +        assert seed_file_id in manifest.files
    +
    +        # Add another seed file
    +        write_file(raw_customers_csv, project.project_root, "seeds", "more_customers.csv")
    +        run_dbt(["--partial-parse", "run"])
    +        seed_file_id = "test://" + normalize("seeds/more_customers.csv")
    +        manifest = get_manifest(project.project_root)
    +        assert seed_file_id in manifest.files
    +        seed_id = "seed.test.more_customers"
    +        assert seed_id in manifest.nodes
    +
    +        # Remove seed file and add a schema files with a source referring to raw_customers
    +        rm_file(project.project_root, "seeds", "more_customers.csv")
    +        write_file(schema_sources1_yml, project.project_root, "models", "sources.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        manifest = get_manifest(project.project_root)
    +        assert len(manifest.sources) == 1
    +        file_id = "test://" + normalize("models/sources.yml")
    +        assert file_id in manifest.files
    +
    +        # add a model referring to raw_customers source
    +        write_file(customers_sql, project.project_root, "models", "customers.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 2
    +
    +        # remove sources schema file
    +        rm_file(project.project_root, "models", "sources.yml")
    +        with pytest.raises(CompilationError):
    +            results = run_dbt(["--partial-parse", "run"])
    +
    +        # put back sources and add an exposures file
    +        write_file(schema_sources2_yml, project.project_root, "models", "sources.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +
    +        # remove seed referenced in exposures file
    +        rm_file(project.project_root, "seeds", "raw_customers.csv")
    +        with pytest.raises(CompilationError):
    +            results = run_dbt(["--partial-parse", "run"])
    +
    +        # put back seed and remove depends_on from exposure
    +        write_file(raw_customers_csv, project.project_root, "seeds", "raw_customers.csv")
    +        write_file(schema_sources3_yml, project.project_root, "models", "sources.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +
    +        # Add seed config with test to schema.yml, remove exposure
    +        write_file(schema_sources4_yml, project.project_root, "models", "sources.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +
    +        # Change seed name to wrong name
    +        write_file(schema_sources5_yml, project.project_root, "models", "sources.yml")
    +        with pytest.raises(CompilationError):
    +            results = run_dbt(["--partial-parse", "--warn-error", "run"])
    +
    +        # Put back seed name to right name
    +        write_file(schema_sources4_yml, project.project_root, "models", "sources.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +
    +        # Add docs file customers.md
    +        write_file(customers1_md, project.project_root, "models", "customers.md")
    +        results = run_dbt(["--partial-parse", "run"])
    +
    +        # Change docs file customers.md
    +        write_file(customers2_md, project.project_root, "models", "customers.md")
    +        results = run_dbt(["--partial-parse", "run"])
    +
    +        # Delete docs file
    +        rm_file(project.project_root, "models", "customers.md")
    +        results = run_dbt(["--partial-parse", "run"])
    +
    +        # Add a data test
    +        write_file(test_macro_sql, project.project_root, "macros", "test-macro.sql")
    +        write_file(my_test_sql, project.project_root, "tests", "my_test.sql")
    +        results = run_dbt(["--partial-parse", "test"])
    +        manifest = get_manifest(project.project_root)
    +        assert len(manifest.nodes) == 9
    +        test_id = "test.test.my_test"
    +        assert test_id in manifest.nodes
    +
    +        # Change macro that data test depends on
    +        write_file(test_macro2_sql, project.project_root, "macros", "test-macro.sql")
    +        results = run_dbt(["--partial-parse", "test"])
    +        manifest = get_manifest(project.project_root)
    +
    +        # Add an analysis
    +        write_file(my_analysis_sql, project.project_root, "analyses", "my_analysis.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        manifest = get_manifest(project.project_root)
    +
    +        # Remove data test
    +        rm_file(project.project_root, "tests", "my_test.sql")
    +        results = run_dbt(["--partial-parse", "test"])
    +        manifest = get_manifest(project.project_root)
    +        assert len(manifest.nodes) == 9
    +
    +        # Remove analysis
    +        rm_file(project.project_root, "analyses", "my_analysis.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        manifest = get_manifest(project.project_root)
    +        assert len(manifest.nodes) == 8
    +
    +        # Change source test
    +        write_file(sources_tests2_sql, project.project_root, "macros", "tests.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +
    +
    +class TestPartialParsingDependency:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_one.sql": model_one_sql,
    +        }
    +
    +    @pytest.fixture(scope="class", autouse=True)
    +    def setUp(self, project_root):
    +        local_dependency_files = {
    +            "dbt_project.yml": local_dependency__dbt_project_yml,
    +            "models": {
    +                "schema.yml": local_dependency__models__schema_yml,
    +                "model_to_import.sql": local_dependency__models__model_to_import_sql,
    +            },
    +            "macros": {"dep_macro.sql": local_dependency__macros__dep_macro_sql},
    +            "seeds": {"seed.csv": local_dependency__seeds__seed_csv},
    +        }
    +        write_project_files(project_root, "local_dependency", local_dependency_files)
    +
    +    @pytest.fixture(scope="class")
    +    def packages(self):
    +        return {"packages": [{"local": "local_dependency"}]}
    +
    +    def test_parsing_with_dependency(self, project):
    +        run_dbt(["clean"])
    +        run_dbt(["deps"])
    +        run_dbt(["seed"])
    +        run_dbt(["run"])
    +
    +        # Add a source override
    +        write_file(schema_models_c_yml, project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 2
    +        manifest = get_manifest(project.project_root)
    +        assert len(manifest.sources) == 1
    +        source_id = "source.local_dep.seed_source.seed"
    +        assert source_id in manifest.sources
    +        # We have 1 root model, 1 local_dep model, 1 local_dep seed, 1 local_dep source test, 2 root source tests
    +        assert len(manifest.nodes) == 5
    +        test_id = "test.local_dep.source_unique_seed_source_seed_id.afa94935ed"
    +        assert test_id in manifest.nodes
    +
    +        # Remove a source override
    +        rm_file(project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        manifest = get_manifest(project.project_root)
    +        assert len(manifest.sources) == 1
    +
    +
    +class TestNestedMacros:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_a.sql": model_a_sql,
    +            "model_b.sql": model_b_sql,
    +            "schema.yml": macros_schema_yml,
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def macros(self):
    +        return {
    +            "custom_schema_tests.sql": custom_schema_tests1_sql,
    +        }
    +
    +    def test_nested_macros(self, project):
    +        results = run_dbt()
    +        assert len(results) == 2
    +        manifest = get_manifest(project.project_root)
    +        macro_child_map = manifest.build_macro_child_map()
    +        macro_unique_id = "macro.test.test_type_two"
    +        assert macro_unique_id in macro_child_map
    +
    +        results = run_dbt(["test"], expect_pass=False)
    +        results = sorted(results, key=lambda r: r.node.name)
    +        assert len(results) == 2
    +        # type_one_model_a_
    +        assert results[0].status == TestStatus.Fail
    +        assert re.search(r"union all", results[0].node.compiled_code)
    +        # type_two_model_a_
    +        assert results[1].status == TestStatus.Warn
    +        assert results[1].node.config.severity == "WARN"
    +
    +        write_file(
    +            custom_schema_tests2_sql, project.project_root, "macros", "custom_schema_tests.sql"
    +        )
    +        results = run_dbt(["--partial-parse", "test"], expect_pass=False)
    +        manifest = get_manifest(project.project_root)
    +        test_node_id = "test.test.type_two_model_a_.842bc6c2a7"
    +        assert test_node_id in manifest.nodes
    +        results = sorted(results, key=lambda r: r.node.name)
    +        assert len(results) == 2
    +        # type_two_model_a_
    +        assert results[1].status == TestStatus.Fail
    +        assert results[1].node.config.severity == "ERROR"
    +
    +
    +class TestSkipMacros:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_one.sql": model_one_sql,
    +            "eschema.yml": empty_schema_yml,
    +        }
    +
    +    def test_skip_macros(self, project):
    +        # initial run so we have a msgpack file
    +        # includes empty_schema file for bug #4850
    +        results = run_dbt()
    +
    +        # add a new ref override macro
    +        write_file(ref_override_sql, project.project_root, "macros", "ref_override.sql")
    +        results, log_output = run_dbt_and_capture(["--partial-parse", "run"])
    +        assert "Starting full parse." in log_output
    +
    +        # modify a ref override macro
    +        write_file(ref_override2_sql, project.project_root, "macros", "ref_override.sql")
    +        results, log_output = run_dbt_and_capture(["--partial-parse", "run"])
    +        assert "Starting full parse." in log_output
    +
    +        # remove a ref override macro
    +        rm_file(project.project_root, "macros", "ref_override.sql")
    +        results, log_output = run_dbt_and_capture(["--partial-parse", "run"])
    +        assert "Starting full parse." in log_output
    +
    +        # custom generate_schema_name macro
    +        write_file(gsm_override_sql, project.project_root, "macros", "gsm_override.sql")
    +        results, log_output = run_dbt_and_capture(["--partial-parse", "run"])
    +        assert "Starting full parse." in log_output
    +
    +        # change generate_schema_name macro
    +        write_file(gsm_override2_sql, project.project_root, "macros", "gsm_override.sql")
    +        results, log_output = run_dbt_and_capture(["--partial-parse", "run"])
    +        assert "Starting full parse." in log_output
    +
    +
    +class TestSnapshots:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "orders.sql": orders_sql,
    +        }
    +
    +    def test_pp_snapshots(self, project):
    +
    +        # initial run
    +        results = run_dbt()
    +        assert len(results) == 1
    +
    +        # add snapshot
    +        write_file(snapshot_sql, project.project_root, "snapshots", "snapshot.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 1
    +        manifest = get_manifest(project.project_root)
    +        snapshot_id = "snapshot.test.orders_snapshot"
    +        assert snapshot_id in manifest.nodes
    +        snapshot2_id = "snapshot.test.orders2_snapshot"
    +        assert snapshot2_id in manifest.nodes
    +
    +        # run snapshot
    +        results = run_dbt(["--partial-parse", "snapshot"])
    +        assert len(results) == 2
    +
    +        # modify snapshot
    +        write_file(snapshot2_sql, project.project_root, "snapshots", "snapshot.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 1
    +
    +        # delete snapshot
    +        rm_file(project.project_root, "snapshots", "snapshot.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 1
    +
    +
    +class TestTests:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "orders.sql": orders_sql,
    +            "schema.yml": generic_schema_yml,
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def tests(self):
    +        # Make sure "generic" directory is created
    +        return {"generic": {"readme.md": ""}}
    +
    +    def test_pp_generic_tests(self, project):
    +
    +        # initial run
    +        results = run_dbt()
    +        assert len(results) == 1
    +        manifest = get_manifest(project.project_root)
    +        expected_nodes = ["model.test.orders", "test.test.unique_orders_id.1360ecc70e"]
    +        assert expected_nodes == list(manifest.nodes.keys())
    +
    +        # add generic test in test-path
    +        write_file(generic_test_sql, project.project_root, "tests", "generic", "generic_test.sql")
    +        write_file(generic_test_schema_yml, project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 1
    +        manifest = get_manifest(project.project_root)
    +        test_id = "test.test.is_odd_orders_id.82834fdc5b"
    +        assert test_id in manifest.nodes
    +        expected_nodes = [
    +            "model.test.orders",
    +            "test.test.unique_orders_id.1360ecc70e",
    +            "test.test.is_odd_orders_id.82834fdc5b",
    +        ]
    +        assert expected_nodes == list(manifest.nodes.keys())
    +
    +        # edit generic test in test-path
    +        write_file(
    +            generic_test_edited_sql, project.project_root, "tests", "generic", "generic_test.sql"
    +        )
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 1
    +        manifest = get_manifest(project.project_root)
    +        test_id = "test.test.is_odd_orders_id.82834fdc5b"
    +        assert test_id in manifest.nodes
    +        expected_nodes = [
    +            "model.test.orders",
    +            "test.test.unique_orders_id.1360ecc70e",
    +            "test.test.is_odd_orders_id.82834fdc5b",
    +        ]
    +        assert expected_nodes == list(manifest.nodes.keys())
    diff --git a/tests/functional/partial_parsing/test_pp_metrics.py b/tests/functional/partial_parsing/test_pp_metrics.py
    new file mode 100644
    index 00000000000..575c5ca613e
    --- /dev/null
    +++ b/tests/functional/partial_parsing/test_pp_metrics.py
    @@ -0,0 +1,73 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt, write_file, get_manifest
    +from tests.functional.partial_parsing.fixtures import (
    +    people_sql,
    +    people_metrics_yml,
    +    people_metrics2_yml,
    +    metric_model_a_sql,
    +    people_metrics3_yml,
    +)
    +
    +from dbt.exceptions import CompilationError
    +
    +
    +class TestMetrics:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "people.sql": people_sql,
    +        }
    +
    +    def test_metrics(self, project):
    +        # initial run
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +        manifest = get_manifest(project.project_root)
    +        assert len(manifest.nodes) == 1
    +
    +        # Add metrics yaml file
    +        write_file(people_metrics_yml, project.project_root, "models", "people_metrics.yml")
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +        manifest = get_manifest(project.project_root)
    +        assert len(manifest.metrics) == 2
    +        metric_people_id = "metric.test.number_of_people"
    +        metric_tenure_id = "metric.test.collective_tenure"
    +        metric_people = manifest.metrics[metric_people_id]
    +        metric_tenure = manifest.metrics[metric_tenure_id]
    +        expected_meta = {"my_meta": "testing"}
    +        assert metric_people.meta == expected_meta
    +        assert metric_people.refs == [["people"]]
    +        assert metric_tenure.refs == [["people"]]
    +        expected_depends_on_nodes = ["model.test.people"]
    +        assert metric_people.depends_on.nodes == expected_depends_on_nodes
    +
    +        # Change metrics yaml files
    +        write_file(people_metrics2_yml, project.project_root, "models", "people_metrics.yml")
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +        manifest = get_manifest(project.project_root)
    +        metric_people = manifest.metrics[metric_people_id]
    +        expected_meta = {"my_meta": "replaced"}
    +        assert metric_people.meta == expected_meta
    +        expected_depends_on_nodes = ["model.test.people"]
    +        assert metric_people.depends_on.nodes == expected_depends_on_nodes
    +
    +        # Add model referring to metric
    +        write_file(metric_model_a_sql, project.project_root, "models", "metric_model_a.sql")
    +        results = run_dbt(["run"])
    +        manifest = get_manifest(project.project_root)
    +        model_a = manifest.nodes["model.test.metric_model_a"]
    +        expected_depends_on_nodes = [
    +            "metric.test.number_of_people",
    +            "metric.test.collective_tenure",
    +        ]
    +        assert model_a.depends_on.nodes == expected_depends_on_nodes
    +
    +        # Then delete a metric
    +        write_file(people_metrics3_yml, project.project_root, "models", "people_metrics.yml")
    +        with pytest.raises(CompilationError):
    +            # We use "parse" here and not "run" because we're checking that the CompilationError
    +            # occurs at parse time, not compilation
    +            results = run_dbt(["parse"])
    diff --git a/tests/functional/partial_parsing/test_pp_vars.py b/tests/functional/partial_parsing/test_pp_vars.py
    new file mode 100644
    index 00000000000..19b3c7db849
    --- /dev/null
    +++ b/tests/functional/partial_parsing/test_pp_vars.py
    @@ -0,0 +1,386 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt, write_file, run_dbt_and_capture, get_manifest
    +
    +from tests.functional.partial_parsing.fixtures import (
    +    model_color_sql,
    +    env_var_model_sql,
    +    env_var_schema_yml,
    +    env_var_model_one_sql,
    +    raw_customers_csv,
    +    env_var_sources_yml,
    +    test_color_sql,
    +    env_var_schema2_yml,
    +    env_var_schema3_yml,
    +    env_var_macro_sql,
    +    env_var_macros_yml,
    +    env_var_model_test_yml,
    +    people_sql,
    +    env_var_metrics_yml,
    +    model_one_sql,
    +)
    +
    +
    +from dbt.exceptions import ParsingError
    +from dbt.constants import SECRET_ENV_PREFIX
    +import os
    +
    +
    +os.environ["DBT_PP_TEST"] = "true"
    +
    +
    +class TestEnvVars:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_color.sql": model_color_sql,
    +        }
    +
    +    def test_env_vars_models(self, project):
    +
    +        # initial run
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +
    +        # copy a file with an env_var call without an env_var
    +        write_file(env_var_model_sql, project.project_root, "models", "env_var_model.sql")
    +        with pytest.raises(ParsingError):
    +            results = run_dbt(["--partial-parse", "run"])
    +
    +        # set the env var
    +        os.environ["ENV_VAR_TEST"] = "TestingEnvVars"
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 2
    +        manifest = get_manifest(project.project_root)
    +        expected_env_vars = {"ENV_VAR_TEST": "TestingEnvVars"}
    +        assert expected_env_vars == manifest.env_vars
    +        model_id = "model.test.env_var_model"
    +        model = manifest.nodes[model_id]
    +        model_created_at = model.created_at
    +
    +        # change the env var
    +        os.environ["ENV_VAR_TEST"] = "second"
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 2
    +        manifest = get_manifest(project.project_root)
    +        expected_env_vars = {"ENV_VAR_TEST": "second"}
    +        assert expected_env_vars == manifest.env_vars
    +        assert model_created_at != manifest.nodes[model_id].created_at
    +
    +        # set an env_var in a schema file
    +        write_file(env_var_schema_yml, project.project_root, "models", "schema.yml")
    +        write_file(env_var_model_one_sql, project.project_root, "models", "model_one.sql")
    +        with pytest.raises(ParsingError):
    +            results = run_dbt(["--partial-parse", "run"])
    +
    +        # actually set the env_var
    +        os.environ["TEST_SCHEMA_VAR"] = "view"
    +        results = run_dbt(["--partial-parse", "run"])
    +        manifest = get_manifest(project.project_root)
    +        expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view"}
    +        assert expected_env_vars == manifest.env_vars
    +
    +        # env vars in a source
    +        os.environ["ENV_VAR_DATABASE"] = "dbt"
    +        os.environ["ENV_VAR_SEVERITY"] = "warn"
    +        write_file(raw_customers_csv, project.project_root, "seeds", "raw_customers.csv")
    +        write_file(env_var_sources_yml, project.project_root, "models", "sources.yml")
    +        run_dbt(["--partial-parse", "seed"])
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +        manifest = get_manifest(project.project_root)
    +        expected_env_vars = {
    +            "ENV_VAR_TEST": "second",
    +            "TEST_SCHEMA_VAR": "view",
    +            "ENV_VAR_DATABASE": "dbt",
    +            "ENV_VAR_SEVERITY": "warn",
    +        }
    +        assert expected_env_vars == manifest.env_vars
    +        assert len(manifest.sources) == 1
    +        source_id = "source.test.seed_sources.raw_customers"
    +        source = manifest.sources[source_id]
    +        assert source.database == "dbt"
    +        schema_file = manifest.files[source.file_id]
    +        test_id = "test.test.source_not_null_seed_sources_raw_customers_id.e39ee7bf0d"
    +        test_node = manifest.nodes[test_id]
    +        assert test_node.config.severity == "WARN"
    +
    +        # Change severity env var
    +        os.environ["ENV_VAR_SEVERITY"] = "error"
    +        results = run_dbt(["--partial-parse", "run"])
    +        manifest = get_manifest(project.project_root)
    +        expected_env_vars = {
    +            "ENV_VAR_TEST": "second",
    +            "TEST_SCHEMA_VAR": "view",
    +            "ENV_VAR_DATABASE": "dbt",
    +            "ENV_VAR_SEVERITY": "error",
    +        }
    +        assert expected_env_vars == manifest.env_vars
    +        source_id = "source.test.seed_sources.raw_customers"
    +        source = manifest.sources[source_id]
    +        schema_file = manifest.files[source.file_id]
    +        expected_schema_file_env_vars = {
    +            "sources": {"seed_sources": ["ENV_VAR_DATABASE", "ENV_VAR_SEVERITY"]}
    +        }
    +        assert expected_schema_file_env_vars == schema_file.env_vars
    +        test_node = manifest.nodes[test_id]
    +        assert test_node.config.severity == "ERROR"
    +
    +        # Change database env var
    +        os.environ["ENV_VAR_DATABASE"] = "test_dbt"
    +        results = run_dbt(["--partial-parse", "run"])
    +        manifest = get_manifest(project.project_root)
    +        expected_env_vars = {
    +            "ENV_VAR_TEST": "second",
    +            "TEST_SCHEMA_VAR": "view",
    +            "ENV_VAR_DATABASE": "test_dbt",
    +            "ENV_VAR_SEVERITY": "error",
    +        }
    +        assert expected_env_vars == manifest.env_vars
    +        source = manifest.sources[source_id]
    +        assert source.database == "test_dbt"
    +
    +        # Delete database env var
    +        del os.environ["ENV_VAR_DATABASE"]
    +        with pytest.raises(ParsingError):
    +            results = run_dbt(["--partial-parse", "run"])
    +        os.environ["ENV_VAR_DATABASE"] = "test_dbt"
    +
    +        # Add generic test with test kwarg that's rendered late (no curly brackets)
    +        os.environ["ENV_VAR_DATABASE"] = "dbt"
    +        write_file(test_color_sql, project.project_root, "macros", "test_color.sql")
    +        results = run_dbt(["--partial-parse", "run"])
    +        # Add source test using test_color and an env_var for color
    +        write_file(env_var_schema2_yml, project.project_root, "models/schema.yml")
    +        with pytest.raises(ParsingError):
    +            results = run_dbt(["--partial-parse", "run"])
    +        os.environ["ENV_VAR_COLOR"] = "green"
    +        results = run_dbt(["--partial-parse", "run"])
    +        manifest = get_manifest(project.project_root)
    +        test_color_id = "test.test.check_color_model_one_env_var_ENV_VAR_COLOR___fun.89638de387"
    +        test_node = manifest.nodes[test_color_id]
    +        # kwarg was rendered but not changed (it will be rendered again when compiled)
    +        assert test_node.test_metadata.kwargs["color"] == "env_var('ENV_VAR_COLOR')"
    +        results = run_dbt(["--partial-parse", "test"])
    +
    +        # Add an exposure with an env_var
    +        os.environ["ENV_VAR_OWNER"] = "John Doe"
    +        write_file(env_var_schema3_yml, project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        manifest = get_manifest(project.project_root)
    +        expected_env_vars = {
    +            "ENV_VAR_TEST": "second",
    +            "TEST_SCHEMA_VAR": "view",
    +            "ENV_VAR_DATABASE": "dbt",
    +            "ENV_VAR_SEVERITY": "error",
    +            "ENV_VAR_COLOR": "green",
    +            "ENV_VAR_OWNER": "John Doe",
    +        }
    +        assert expected_env_vars == manifest.env_vars
    +        exposure = list(manifest.exposures.values())[0]
    +        schema_file = manifest.files[exposure.file_id]
    +        expected_sf_env_vars = {
    +            "models": {"model_one": ["TEST_SCHEMA_VAR", "ENV_VAR_COLOR"]},
    +            "exposures": {"proxy_for_dashboard": ["ENV_VAR_OWNER"]},
    +        }
    +        assert expected_sf_env_vars == schema_file.env_vars
    +
    +        # add a macro and a macro schema file
    +        os.environ["ENV_VAR_SOME_KEY"] = "toodles"
    +        write_file(env_var_macro_sql, project.project_root, "macros", "env_var_macro.sql")
    +        write_file(env_var_macros_yml, project.project_root, "macros", "env_var_macros.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        manifest = get_manifest(project.project_root)
    +        expected_env_vars = {
    +            "ENV_VAR_TEST": "second",
    +            "TEST_SCHEMA_VAR": "view",
    +            "ENV_VAR_DATABASE": "dbt",
    +            "ENV_VAR_SEVERITY": "error",
    +            "ENV_VAR_COLOR": "green",
    +            "ENV_VAR_OWNER": "John Doe",
    +            "ENV_VAR_SOME_KEY": "toodles",
    +        }
    +        assert expected_env_vars == manifest.env_vars
    +        macro_id = "macro.test.do_something"
    +        macro = manifest.macros[macro_id]
    +        assert macro.meta == {"some_key": "toodles"}
    +        # change the env var
    +        os.environ["ENV_VAR_SOME_KEY"] = "dumdedum"
    +        results = run_dbt(["--partial-parse", "run"])
    +        manifest = get_manifest(project.project_root)
    +        macro = manifest.macros[macro_id]
    +        assert macro.meta == {"some_key": "dumdedum"}
    +
    +        # Add a schema file with a test on model_color and env_var in test enabled config
    +        write_file(env_var_model_test_yml, project.project_root, "models", "schema.yml")
    +        results = run_dbt(["--partial-parse", "run"])
    +        assert len(results) == 3
    +        manifest = get_manifest(project.project_root)
    +        model_color = manifest.nodes["model.test.model_color"]
    +        schema_file = manifest.files[model_color.patch_path]
    +        expected_env_vars = {
    +            "models": {
    +                "model_one": ["TEST_SCHEMA_VAR", "ENV_VAR_COLOR"],
    +                "model_color": ["ENV_VAR_ENABLED"],
    +            },
    +            "exposures": {"proxy_for_dashboard": ["ENV_VAR_OWNER"]},
    +        }
    +        assert expected_env_vars == schema_file.env_vars
    +
    +        # Add a metrics file with env_vars
    +        os.environ["ENV_VAR_METRICS"] = "TeStInG"
    +        write_file(people_sql, project.project_root, "models", "people.sql")
    +        write_file(env_var_metrics_yml, project.project_root, "models", "metrics.yml")
    +        results = run_dbt(["run"])
    +        manifest = get_manifest(project.project_root)
    +        assert "ENV_VAR_METRICS" in manifest.env_vars
    +        assert manifest.env_vars["ENV_VAR_METRICS"] == "TeStInG"
    +        metric_node = manifest.metrics["metric.test.number_of_people"]
    +        assert metric_node.meta == {"my_meta": "TeStInG"}
    +
    +        # Change metrics env var
    +        os.environ["ENV_VAR_METRICS"] = "Changed!"
    +        results = run_dbt(["run"])
    +        manifest = get_manifest(project.project_root)
    +        metric_node = manifest.metrics["metric.test.number_of_people"]
    +        assert metric_node.meta == {"my_meta": "Changed!"}
    +
    +        # delete the env vars to cleanup
    +        del os.environ["ENV_VAR_TEST"]
    +        del os.environ["ENV_VAR_SEVERITY"]
    +        del os.environ["ENV_VAR_DATABASE"]
    +        del os.environ["TEST_SCHEMA_VAR"]
    +        del os.environ["ENV_VAR_COLOR"]
    +        del os.environ["ENV_VAR_SOME_KEY"]
    +        del os.environ["ENV_VAR_OWNER"]
    +        del os.environ["ENV_VAR_METRICS"]
    +
    +
    +class TestProjectEnvVars:
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        # Need to set the environment variable here initially because
    +        # the project fixture loads the config.
    +        os.environ["ENV_VAR_NAME"] = "Jane Smith"
    +        return {"models": {"+meta": {"meta_name": "{{ env_var('ENV_VAR_NAME') }}"}}}
    +
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_one.sql": model_one_sql,
    +        }
    +
    +    def test_project_env_vars(self, project):
    +        # Initial run
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +        manifest = get_manifest(project.project_root)
    +        state_check = manifest.state_check
    +        model_id = "model.test.model_one"
    +        model = manifest.nodes[model_id]
    +        assert model.config.meta["meta_name"] == "Jane Smith"
    +        env_vars_hash_checksum = state_check.project_env_vars_hash.checksum
    +
    +        # Change the environment variable
    +        os.environ["ENV_VAR_NAME"] = "Jane Doe"
    +        results = run_dbt(["run"])
    +        assert len(results) == 1
    +        manifest = get_manifest(project.project_root)
    +        model = manifest.nodes[model_id]
    +        assert model.config.meta["meta_name"] == "Jane Doe"
    +        assert env_vars_hash_checksum != manifest.state_check.project_env_vars_hash.checksum
    +
    +        # cleanup
    +        del os.environ["ENV_VAR_NAME"]
    +
    +
    +class TestProfileEnvVars:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_one.sql": model_one_sql,
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def dbt_profile_target(self):
    +        # Need to set these here because the base integration test class
    +        # calls 'load_config' before the tests are run.
    +        # Note: only the specified profile is rendered, so there's no
    +        # point it setting env_vars in non-used profiles.
    +        os.environ["ENV_VAR_USER"] = "root"
    +        os.environ["ENV_VAR_PASS"] = "password"
    +        return {
    +            "type": "postgres",
    +            "threads": 4,
    +            "host": "localhost",
    +            "port": 5432,
    +            "user": "{{ env_var('ENV_VAR_USER') }}",
    +            "pass": "{{ env_var('ENV_VAR_PASS') }}",
    +            "dbname": "dbt",
    +        }
    +
    +    def test_profile_env_vars(self, project):
    +
    +        # Initial run
    +        os.environ["ENV_VAR_USER"] = "root"
    +        os.environ["ENV_VAR_PASS"] = "password"
    +
    +        results = run_dbt(["run"])
    +        manifest = get_manifest(project.project_root)
    +        env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum
    +
    +        # Change env_vars, the user doesn't exist, this should fail
    +        os.environ["ENV_VAR_USER"] = "fake_user"
    +        (results, log_output) = run_dbt_and_capture(["run"], expect_pass=False)
    +        assert "env vars used in profiles.yml have changed" in log_output
    +        manifest = get_manifest(project.project_root)
    +        assert env_vars_checksum != manifest.state_check.profile_env_vars_hash.checksum
    +
    +
    +class TestProfileSecretEnvVars:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "model_one.sql": model_one_sql,
    +        }
    +
    +    @property
    +    def dbt_profile_target(self):
    +        # Need to set these here because the base integration test class
    +        # calls 'load_config' before the tests are run.
    +        # Note: only the specified profile is rendered, so there's no
    +        # point in setting env_vars in non-used profiles.
    +
    +        # user is secret and password is not. postgres on macos doesn't care if the password
    +        # changes so we have to change the user. related: https://github.com/dbt-labs/dbt-core/pull/4250
    +        os.environ[SECRET_ENV_PREFIX + "USER"] = "root"
    +        os.environ["ENV_VAR_PASS"] = "password"
    +        return {
    +            "type": "postgres",
    +            "threads": 4,
    +            "host": "localhost",
    +            "port": 5432,
    +            "user": "{{ env_var('DBT_ENV_SECRET_USER') }}",
    +            "pass": "{{ env_var('ENV_VAR_PASS') }}",
    +            "dbname": "dbt",
    +        }
    +
    +    def test_profile_secret_env_vars(self, project):
    +
    +        # Initial run
    +        os.environ[SECRET_ENV_PREFIX + "USER"] = "root"
    +        os.environ["ENV_VAR_PASS"] = "password"
    +
    +        results = run_dbt(["run"])
    +        manifest = get_manifest(project.project_root)
    +        env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum
    +
    +        # Change a secret var, it shouldn't register because we shouldn't save secrets.
    +        os.environ[SECRET_ENV_PREFIX + "USER"] = "fake_user"
    +        # we just want to see if the manifest has included
    +        # the secret in the hash of environment variables.
    +        (results, log_output) = run_dbt_and_capture(["run"], expect_pass=True)
    +        # I020 is the event code for "env vars used in profiles.yml have changed"
    +        assert not ("I020" in log_output)
    +        manifest = get_manifest(project.project_root)
    +        assert env_vars_checksum == manifest.state_check.profile_env_vars_hash.checksum
    diff --git a/test/integration/060_persist_docs_tests/models/schema.yml b/tests/functional/persist_docs_tests/fixtures.py
    similarity index 62%
    rename from test/integration/060_persist_docs_tests/models/schema.yml
    rename to tests/functional/persist_docs_tests/fixtures.py
    index 5a909162456..f7179bb1ab5 100644
    --- a/test/integration/060_persist_docs_tests/models/schema.yml
    +++ b/tests/functional/persist_docs_tests/fixtures.py
    @@ -1,3 +1,63 @@
    +_MODELS__VIEW = """
    +{{ config(materialized='view') }}
    +select 2 as id, 'Bob' as name
    +"""
    +
    +_MODELS__NO_DOCS_MODEL = """
    +select 1 as id, 'Alice' as name
    +"""
    +
    +_DOCS__MY_FUN_DOCS = """
    +{% docs my_fun_doc %}
    +name Column description "with double quotes"
    +and with 'single  quotes' as welll as other;
    +'''abc123'''
    +reserved -- characters
    +--
    +/* comment */
    +Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting
    +
    +{% enddocs %}
    +"""
    +
    +_MODELS__TABLE = """
    +{{ config(materialized='table') }}
    +select 1 as id, 'Joe' as name
    +"""
    +
    +
    +_MODELS__MISSING_COLUMN = """
    +{{ config(materialized='table') }}
    +select 1 as id, 'Ed' as name
    +"""
    +
    +_MODELS__MODEL_USING_QUOTE_UTIL = """
    +select 1 as {{ adapter.quote("2id") }}
    +"""
    +
    +_PROPERTIES__QUOTE_MODEL = """
    +version: 2
    +models:
    +  - name: quote_model
    +    description: "model to test column quotes and comments"
    +    columns:
    +      - name: 2id
    +        description: "XXX My description"
    +        quote: true
    +"""
    +
    +_PROPERITES__SCHEMA_MISSING_COL = """
    +version: 2
    +models:
    +  - name: missing_column
    +    columns:
    +      - name: id
    +        description: "test id column description"
    +      - name: column_that_does_not_exist
    +        description: "comment that cannot be created"
    +"""
    +
    +_PROPERTIES__SCHEMA_YML = """
     version: 2
     
     models:
    @@ -68,3 +128,10 @@
             description: |
               Some stuff here and then a call to
               {{ doc('my_fun_doc')}}
    +"""
    +
    +
    +_SEEDS__SEED = """id,name
    +1,Alice
    +2,Bob
    +"""
    diff --git a/tests/functional/persist_docs_tests/test_persist_docs.py b/tests/functional/persist_docs_tests/test_persist_docs.py
    new file mode 100644
    index 00000000000..7ca5dcfabe8
    --- /dev/null
    +++ b/tests/functional/persist_docs_tests/test_persist_docs.py
    @@ -0,0 +1,194 @@
    +import json
    +import os
    +import pytest
    +
    +from dbt.tests.util import (
    +    run_dbt,
    +)
    +
    +from tests.functional.persist_docs_tests.fixtures import (
    +    _DOCS__MY_FUN_DOCS,
    +    _MODELS__MISSING_COLUMN,
    +    _MODELS__MODEL_USING_QUOTE_UTIL,
    +    _MODELS__NO_DOCS_MODEL,
    +    _MODELS__TABLE,
    +    _MODELS__VIEW,
    +    _PROPERTIES__QUOTE_MODEL,
    +    _PROPERITES__SCHEMA_MISSING_COL,
    +    _PROPERTIES__SCHEMA_YML,
    +    _SEEDS__SEED,
    +)
    +
    +
    +class BasePersistDocsTest:
    +    @pytest.fixture(scope="class", autouse=True)
    +    def setUp(self, project):
    +        run_dbt(["seed"])
    +        run_dbt()
    +
    +    @pytest.fixture(scope="class")
    +    def seeds(self):
    +        return {"seed.csv": _SEEDS__SEED}
    +
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {
    +            "no_docs_model.sql": _MODELS__NO_DOCS_MODEL,
    +            "table_model.sql": _MODELS__TABLE,
    +            "view_model.sql": _MODELS__VIEW,
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def properties(self):
    +        return {
    +            "my_fun_docs.md": _DOCS__MY_FUN_DOCS,
    +            "schema.yml": _PROPERTIES__SCHEMA_YML,
    +        }
    +
    +    def _assert_common_comments(self, *comments):
    +        for comment in comments:
    +            assert '"with double quotes"' in comment
    +            assert """'''abc123'''""" in comment
    +            assert "\n" in comment
    +            assert "Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting" in comment
    +            assert "/* comment */" in comment
    +            if os.name == "nt":
    +                assert "--\r\n" in comment or "--\n" in comment
    +            else:
    +                assert "--\n" in comment
    +
    +    def _assert_has_table_comments(self, table_node):
    +        table_comment = table_node["metadata"]["comment"]
    +        assert table_comment.startswith("Table model description")
    +
    +        table_id_comment = table_node["columns"]["id"]["comment"]
    +        assert table_id_comment.startswith("id Column description")
    +
    +        table_name_comment = table_node["columns"]["name"]["comment"]
    +        assert table_name_comment.startswith("Some stuff here and then a call to")
    +
    +        self._assert_common_comments(table_comment, table_id_comment, table_name_comment)
    +
    +    def _assert_has_view_comments(
    +        self, view_node, has_node_comments=True, has_column_comments=True
    +    ):
    +        view_comment = view_node["metadata"]["comment"]
    +        if has_node_comments:
    +            assert view_comment.startswith("View model description")
    +            self._assert_common_comments(view_comment)
    +        else:
    +            assert view_comment is None
    +
    +        view_id_comment = view_node["columns"]["id"]["comment"]
    +        if has_column_comments:
    +            assert view_id_comment.startswith("id Column description")
    +            self._assert_common_comments(view_id_comment)
    +        else:
    +            assert view_id_comment is None
    +
    +        view_name_comment = view_node["columns"]["name"]["comment"]
    +        assert view_name_comment is None
    +
    +
    +class TestPersistDocs(BasePersistDocsTest):
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        return {
    +            "models": {
    +                "test": {
    +                    "+persist_docs": {
    +                        "relation": True,
    +                        "columns": True,
    +                    },
    +                }
    +            }
    +        }
    +
    +    def test_has_comments_pglike(self, project):
    +        run_dbt(["docs", "generate"])
    +        with open("target/catalog.json") as fp:
    +            catalog_data = json.load(fp)
    +        assert "nodes" in catalog_data
    +        assert len(catalog_data["nodes"]) == 4
    +        table_node = catalog_data["nodes"]["model.test.table_model"]
    +        view_node = self._assert_has_table_comments(table_node)
    +
    +        view_node = catalog_data["nodes"]["model.test.view_model"]
    +        self._assert_has_view_comments(view_node)
    +
    +        no_docs_node = catalog_data["nodes"]["model.test.no_docs_model"]
    +        self._assert_has_view_comments(no_docs_node, False, False)
    +
    +
    +class TestPersistDocsColumnMissing(BasePersistDocsTest):
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        return {
    +            "models": {
    +                "test": {
    +                    "+persist_docs": {
    +                        "columns": True,
    +                    },
    +                }
    +            }
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"missing_column.sql": _MODELS__MISSING_COLUMN}
    +
    +    @pytest.fixture(scope="class")
    +    def properties(self):
    +        return {"schema.yml": _PROPERITES__SCHEMA_MISSING_COL}
    +
    +    def test_postgres_missing_column(self, project):
    +        run_dbt(["docs", "generate"])
    +        with open("target/catalog.json") as fp:
    +            catalog_data = json.load(fp)
    +        assert "nodes" in catalog_data
    +
    +        table_node = catalog_data["nodes"]["model.test.missing_column"]
    +        table_id_comment = table_node["columns"]["id"]["comment"]
    +        assert table_id_comment.startswith("test id column description")
    +
    +
    +class TestPersistDocsColumnComment:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"quote_model.sql": _MODELS__MODEL_USING_QUOTE_UTIL}
    +
    +    @pytest.fixture(scope="class")
    +    def properties(self):
    +        return {"properties.yml": _PROPERTIES__QUOTE_MODEL}
    +
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        return {
    +            "models": {
    +                "test": {
    +                    "materialized": "table",
    +                    "+persist_docs": {
    +                        "relation": True,
    +                        "columns": True,
    +                    },
    +                }
    +            }
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def run_has_comments(self, project):
    +        def fixt():
    +            run_dbt()
    +            run_dbt(["docs", "generate"])
    +            with open("target/catalog.json") as fp:
    +                catalog_data = json.load(fp)
    +            assert "nodes" in catalog_data
    +            assert len(catalog_data["nodes"]) == 1
    +            column_node = catalog_data["nodes"]["model.test.quote_model"]
    +            column_comment = column_node["columns"]["2id"]["comment"]
    +            assert column_comment.startswith("XXX")
    +
    +        return fixt
    +
    +    def test_postgres_comments(self, run_has_comments):
    +        run_has_comments()
    diff --git a/tests/functional/postgres/test_postgres_indexes.py b/tests/functional/postgres/test_postgres_indexes.py
    index 64d61d2df87..143a0888755 100644
    --- a/tests/functional/postgres/test_postgres_indexes.py
    +++ b/tests/functional/postgres/test_postgres_indexes.py
    @@ -70,7 +70,7 @@ def test_incremental(self, project, unique_schema):
                 results = run_dbt(["run", "--models", "incremental"] + additional_argument)
                 assert len(results) == 1
     
    -            indexes = self.get_indexes('incremental', project, unique_schema)
    +            indexes = self.get_indexes("incremental", project, unique_schema)
                 expected = [
                     {"columns": "column_a", "unique": False, "type": "hash"},
                     {"columns": "column_a, column_b", "unique": True, "type": "btree"},
    @@ -78,11 +78,11 @@ def test_incremental(self, project, unique_schema):
                 assert len(indexes) == len(expected)
     
         def test_seed(self, project, unique_schema):
    -        for additional_argument in [[], [], ['--full-refresh']]:
    +        for additional_argument in [[], [], ["--full-refresh"]]:
                 results = run_dbt(["seed"] + additional_argument)
                 assert len(results) == 1
     
    -            indexes = self.get_indexes('seed', project, unique_schema)
    +            indexes = self.get_indexes("seed", project, unique_schema)
                 expected = [
                     {"columns": "country_code", "unique": False, "type": "hash"},
                     {"columns": "country_code, country_name", "unique": True, "type": "btree"},
    @@ -94,7 +94,7 @@ def test_snapshot(self, project, unique_schema):
                 results = run_dbt(["snapshot", "--vars", f"version: {version}"])
                 assert len(results) == 1
     
    -            indexes = self.get_indexes('colors', project, unique_schema)
    +            indexes = self.get_indexes("colors", project, unique_schema)
                 expected = [
                     {"columns": "id", "unique": False, "type": "hash"},
                     {"columns": "id, color", "unique": True, "type": "btree"},
    @@ -130,7 +130,7 @@ def assertCountEqual(self, a, b):
             assert len(a) == len(b)
     
     
    -class TestPostgresInvalidIndex():
    +class TestPostgresInvalidIndex:
         @pytest.fixture(scope="class")
         def models(self):
             return {
    diff --git a/tests/functional/profiles/test_profile_dir.py b/tests/functional/profiles/test_profile_dir.py
    index 7a4c8214a1d..75a30512dcb 100644
    --- a/tests/functional/profiles/test_profile_dir.py
    +++ b/tests/functional/profiles/test_profile_dir.py
    @@ -1,13 +1,17 @@
    -import io
     import os
     import pytest
     import yaml
    -from contextlib import contextmanager, redirect_stdout
    +
    +from contextlib import contextmanager
     from pathlib import Path
    -from typing import List
     
     import dbt.flags as flags
    -from dbt.tests.util import run_dbt, write_file, rm_file
    +
    +from dbt.tests.util import (
    +    run_dbt_and_capture,
    +    write_file,
    +    rm_file,
    +)
     
     
     @pytest.fixture(scope="class")
    @@ -84,16 +88,6 @@ def environ(env):
                     os.environ[key] = value
     
     
    -# Use this if you need to capture the standard out in a test
    -def run_dbt_and_capture_stdout(args: List[str] = None, expect_pass=True):
    -    stringbuf = io.StringIO()
    -    with redirect_stdout(stringbuf):
    -        res = run_dbt(args, expect_pass=expect_pass)
    -    stdout = stringbuf.getvalue()
    -
    -    return res, stdout
    -
    -
     class TestProfiles:
         def dbt_debug(self, project_dir_cli_arg=None, profiles_dir_cli_arg=None):
             # begin with no command-line args or user config (from profiles.yml)
    @@ -106,8 +100,8 @@ def dbt_debug(self, project_dir_cli_arg=None, profiles_dir_cli_arg=None):
             if profiles_dir_cli_arg:
                 command.extend(["--profiles-dir", str(profiles_dir_cli_arg)])
     
    -        # get the output of `dbt debug` regarless of the exit code
    -        return run_dbt_and_capture_stdout(command, expect_pass=None)
    +        # get the output of `dbt debug` regardless of the exit code
    +        return run_dbt_and_capture(command, expect_pass=None)
     
         @pytest.mark.parametrize(
             "project_dir_cli_arg, working_directory",
    diff --git a/tests/functional/ref_override/test_ref_override.py b/tests/functional/ref_override/test_ref_override.py
    new file mode 100644
    index 00000000000..9a6b1def435
    --- /dev/null
    +++ b/tests/functional/ref_override/test_ref_override.py
    @@ -0,0 +1,79 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt, check_relations_equal
    +from dbt.tests.fixtures.project import write_project_files
    +
    +
    +models__ref_override_sql = """
    +select
    +    *
    +from {{ ref('seed_1') }}
    +"""
    +
    +macros__ref_override_macro_sql = """
    +-- Macro to override ref and always return the same result
    +{% macro ref(modelname) %}
    +{% do return(builtins.ref(modelname).replace_path(identifier='seed_2')) %}
    +{% endmacro %}
    +"""
    +
    +seeds__seed_2_csv = """a,b
    +6,2
    +12,4
    +18,6"""
    +
    +seeds__seed_1_csv = """a,b
    +1,2
    +2,4
    +3,6"""
    +
    +
    +@pytest.fixture(scope="class")
    +def models():
    +    return {"ref_override.sql": models__ref_override_sql}
    +
    +
    +@pytest.fixture(scope="class")
    +def macros():
    +    return {"ref_override_macro.sql": macros__ref_override_macro_sql}
    +
    +
    +@pytest.fixture(scope="class")
    +def seeds():
    +    return {"seed_2.csv": seeds__seed_2_csv, "seed_1.csv": seeds__seed_1_csv}
    +
    +
    +@pytest.fixture(scope="class")
    +def project_files(
    +    project_root,
    +    models,
    +    macros,
    +    seeds,
    +):
    +    write_project_files(project_root, "models", models)
    +    write_project_files(project_root, "macros", macros)
    +    write_project_files(project_root, "seeds", seeds)
    +
    +
    +class TestRefOverride:
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        return {
    +            "config-version": 2,
    +            "seed-paths": ["seeds"],
    +            "macro-paths": ["macros"],
    +            "seeds": {
    +                "quote_columns": False,
    +            },
    +        }
    +
    +    def test_ref_override(
    +        self,
    +        project,
    +    ):
    +        run_dbt(["seed"])
    +        run_dbt(["run"])
    +
    +        # We want it to equal seed_2 and not seed_1. If it's
    +        # still pointing at seed_1 then the override hasn't worked.
    +        check_relations_equal(project.adapter, ["ref_override", "seed_2"])
    diff --git a/tests/functional/relation_names/test_relation_name.py b/tests/functional/relation_names/test_relation_name.py
    index 5d941d96da5..f0c241c9302 100644
    --- a/tests/functional/relation_names/test_relation_name.py
    +++ b/tests/functional/relation_names/test_relation_name.py
    @@ -40,9 +40,13 @@ class TestGeneratedDDLNameRules:
         def setup_class(self):
             self.incremental_filename = "my_name_is_51_characters_incremental_abcdefghijklmn"
             # length is 63
    -        self.max_length_filename = "my_name_is_max_length_chars_abcdefghijklmnopqrstuvwxyz123456789"
    +        self.max_length_filename = (
    +            "my_name_is_max_length_chars_abcdefghijklmnopqrstuvwxyz123456789"
    +        )
             # length is 64
    -        self.over_max_length_filename = "my_name_is_one_over_max_length_chats_abcdefghijklmnopqrstuvwxyz1"
    +        self.over_max_length_filename = (
    +            "my_name_is_one_over_max_length_chats_abcdefghijklmnopqrstuvwxyz1"
    +        )
     
             self.filename_for_backup_file = "my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0"
     
    @@ -57,14 +61,10 @@ def seeds(self):
         @pytest.fixture(scope="class")
         def models(self):
             return {
    -            f"{self.incremental_filename}.sql":
    -                models__basic_incremental,
    -            f"{self.filename_for_backup_file}.sql":
    -                models__basic_table,
    -            f"{self.max_length_filename}.sql":
    -                models__basic_table,
    -            f"{self.over_max_length_filename}.sql":
    -                models__basic_table,
    +            f"{self.incremental_filename}.sql": models__basic_incremental,
    +            f"{self.filename_for_backup_file}.sql": models__basic_table,
    +            f"{self.max_length_filename}.sql": models__basic_table,
    +            f"{self.over_max_length_filename}.sql": models__basic_table,
             }
     
         @pytest.fixture(scope="class")
    @@ -110,15 +110,17 @@ def test_long_name_passes_when_temp_tables_are_generated(self):
         # 63 characters is the character limit for a table name in a postgres database
         # (assuming compiled without changes from source)
         def test_name_longer_than_63_does_not_build(self):
    -        err_msg = "Relation name 'my_name_is_one_over_max"\
    +        err_msg = (
    +            "Relation name 'my_name_is_one_over_max"
                 "_length_chats_abcdefghijklmnopqrstuvwxyz1' is longer than 63 characters"
    +        )
             res = run_dbt(
                 [
                     "run",
                     "-s",
                     self.over_max_length_filename,
                 ],
    -            expect_pass=False
    +            expect_pass=False,
             )
             assert res[0].status == RunStatus.Error
             assert err_msg in res[0].message
    diff --git a/tests/functional/run_operations/test_run_operations.py b/tests/functional/run_operations/test_run_operations.py
    index f91ef2d8359..68e9fb8c6e0 100644
    --- a/tests/functional/run_operations/test_run_operations.py
    +++ b/tests/functional/run_operations/test_run_operations.py
    @@ -2,15 +2,8 @@
     import pytest
     import yaml
     
    -from dbt.tests.util import (
    -    check_table_does_exist,
    -    run_dbt
    -)
    -from tests.functional.run_operations.fixtures import (
    -    happy_macros_sql,
    -    sad_macros_sql,
    -    model_sql
    -)
    +from dbt.tests.util import check_table_does_exist, run_dbt
    +from tests.functional.run_operations.fixtures import happy_macros_sql, sad_macros_sql, model_sql
     
     
     class TestOperations:
    @@ -20,10 +13,7 @@ def models(self):
     
         @pytest.fixture(scope="class")
         def macros(self):
    -        return {
    -            "happy_macros.sql": happy_macros_sql,
    -            "sad_macros.sql": sad_macros_sql
    -        }
    +        return {"happy_macros.sql": happy_macros_sql, "sad_macros.sql": sad_macros_sql}
     
         @pytest.fixture(scope="class")
         def dbt_profile_data(self, unique_schema):
    @@ -46,59 +36,57 @@ def dbt_profile_data(self, unique_schema):
                             "threads": 4,
                             "host": "localhost",
                             "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)),
    -                        "user": 'noaccess',
    -                        "pass": 'password',
    +                        "user": "noaccess",
    +                        "pass": "password",
                             "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"),
    -                        'schema': unique_schema
    -                    }
    +                        "schema": unique_schema,
    +                    },
                     },
                     "target": "default",
                 },
             }
     
         def run_operation(self, macro, expect_pass=True, extra_args=None, **kwargs):
    -        args = ['run-operation', macro]
    +        args = ["run-operation", macro]
             if kwargs:
    -            args.extend(('--args', yaml.safe_dump(kwargs)))
    +            args.extend(("--args", yaml.safe_dump(kwargs)))
             if extra_args:
                 args.extend(extra_args)
             return run_dbt(args, expect_pass=expect_pass)
     
         def test_macro_noargs(self, project):
    -        self.run_operation('no_args')
    -        check_table_does_exist(project.adapter, 'no_args')
    +        self.run_operation("no_args")
    +        check_table_does_exist(project.adapter, "no_args")
     
         def test_macro_args(self, project):
    -        self.run_operation('table_name_args', table_name='my_fancy_table')
    -        check_table_does_exist(project.adapter, 'my_fancy_table')
    +        self.run_operation("table_name_args", table_name="my_fancy_table")
    +        check_table_does_exist(project.adapter, "my_fancy_table")
     
         def test_macro_exception(self, project):
    -        self.run_operation('syntax_error', False)
    +        self.run_operation("syntax_error", False)
     
         def test_macro_missing(self, project):
    -        self.run_operation('this_macro_does_not_exist', False)
    +        self.run_operation("this_macro_does_not_exist", False)
     
         def test_cannot_connect(self, project):
    -        self.run_operation('no_args',
    -                           extra_args=['--target', 'noaccess'],
    -                           expect_pass=False)
    +        self.run_operation("no_args", extra_args=["--target", "noaccess"], expect_pass=False)
     
         def test_vacuum(self, project):
    -        run_dbt(['run'])
    +        run_dbt(["run"])
             # this should succeed
    -        self.run_operation('vacuum', table_name='model')
    +        self.run_operation("vacuum", table_name="model")
     
         def test_vacuum_ref(self, project):
    -        run_dbt(['run'])
    +        run_dbt(["run"])
             # this should succeed
    -        self.run_operation('vacuum_ref', ref_target='model')
    +        self.run_operation("vacuum_ref", ref_target="model")
     
         def test_select(self, project):
    -        self.run_operation('select_something', name='world')
    +        self.run_operation("select_something", name="world")
     
         def test_access_graph(self, project):
    -        self.run_operation('log_graph')
    +        self.run_operation("log_graph")
     
         def test_print(self, project):
             # Tests that calling the `print()` macro does not cause an exception
    -        self.run_operation('print_something')
    +        self.run_operation("print_something")
    diff --git a/test/integration/057_run_query_tests/macros/test_pg_array_queries.sql b/tests/functional/run_query/test_types.py
    similarity index 52%
    rename from test/integration/057_run_query_tests/macros/test_pg_array_queries.sql
    rename to tests/functional/run_query/test_types.py
    index f672d777f6f..825d3793895 100644
    --- a/test/integration/057_run_query_tests/macros/test_pg_array_queries.sql
    +++ b/tests/functional/run_query/test_types.py
    @@ -1,4 +1,8 @@
    +import pytest
     
    +from dbt.tests.util import run_dbt
    +
    +macros_sql = """
     {% macro test_array_results() %}
     
         {% set sql %}
    @@ -14,3 +18,16 @@
         {% endif %}
     
     {% endmacro %}
    +"""
    +
    +
    +class TestTypes:
    +    @pytest.fixture(scope="class")
    +    def macros(self):
    +        return {
    +            "macros.sql": macros_sql,
    +        }
    +
    +    def test_nested_types(self, project):
    +        result = run_dbt(["run-operation", "test_array_results"])
    +        assert result.success
    diff --git a/tests/functional/schema_tests/test_schema_v2_tests.py b/tests/functional/schema_tests/test_schema_v2_tests.py
    index 44a6696931b..7b80c5d3eb4 100644
    --- a/tests/functional/schema_tests/test_schema_v2_tests.py
    +++ b/tests/functional/schema_tests/test_schema_v2_tests.py
    @@ -95,7 +95,7 @@
         alt_local_utils__macros__type_timestamp_sql,
         all_quotes_schema__schema_yml,
     )
    -from dbt.exceptions import ParsingException, CompilationException, DuplicateResourceName
    +from dbt.exceptions import ParsingError, CompilationError, DuplicateResourceNameError
     from dbt.contracts.results import TestStatus
     
     
    @@ -410,7 +410,7 @@ def test_malformed_schema_will_break_run(
             self,
             project,
         ):
    -        with pytest.raises(ParsingException):
    +        with pytest.raises(ParsingError):
                 run_dbt()
     
     
    @@ -904,7 +904,7 @@ def test_generic_test_collision(
             project,
         ):
             """These tests collide, since only the configs differ"""
    -        with pytest.raises(DuplicateResourceName) as exc:
    +        with pytest.raises(DuplicateResourceNameError) as exc:
                 run_dbt()
             assert "dbt found two tests with the name" in str(exc.value)
     
    @@ -922,7 +922,7 @@ def test_generic_test_config_custom_macros(
             project,
         ):
             """This test has a reference to a custom macro its configs"""
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt()
             assert "Invalid generic test configuration" in str(exc)
     
    @@ -987,7 +987,7 @@ def test_invalid_schema_file(
             self,
             project,
         ):
    -        with pytest.raises(ParsingException) as exc:
    +        with pytest.raises(ParsingError) as exc:
                 run_dbt()
             assert re.search(r"'models' is not a list", str(exc))
     
    @@ -1003,11 +1003,13 @@ def models(self):
         def test_quoted_schema_file(self, project):
             try:
                 # A schema file consisting entirely of quotes should not be a problem
    -            run_dbt(['parse'])
    +            run_dbt(["parse"])
             except TypeError:
    -            assert False, '`dbt parse` failed with a yaml file that is all comments with the same exception as 3568'
    +            assert (
    +                False
    +            ), "`dbt parse` failed with a yaml file that is all comments with the same exception as 3568"
             except Exception:
    -            assert False, '`dbt parse` failed with a yaml file that is all comments'
    +            assert False, "`dbt parse` failed with a yaml file that is all comments"
     
     
     class TestWrongSpecificationBlock:
    diff --git a/tests/functional/severity/test_severity.py b/tests/functional/severity/test_severity.py
    new file mode 100644
    index 00000000000..8a76ef6ac24
    --- /dev/null
    +++ b/tests/functional/severity/test_severity.py
    @@ -0,0 +1,122 @@
    +import pytest
    +
    +from dbt.tests.util import run_dbt
    +
    +models__sample_model_sql = """
    +select * from {{ source("raw", "sample_seed") }}
    +"""
    +
    +models__schema_yml = """
    +version: 2
    +sources:
    +  - name: raw
    +    database: "{{ target.database }}"
    +    schema: "{{ target.schema }}"
    +    tables:
    +      - name: sample_seed
    +        columns:
    +          - name: email
    +            tests:
    +              - not_null:
    +                  severity: "{{ 'error' if var('strict', false) else 'warn' }}"
    +models:
    +  - name: sample_model
    +    columns:
    +      - name: email
    +        tests:
    +          - not_null:
    +              severity: "{{ 'error' if var('strict', false) else 'warn' }}"
    +"""
    +
    +seeds__sample_seed_csv = """id,first_name,last_name,email,gender,ip_address,updated_at
    +1,Judith,Kennedy,jkennedy0@phpbb.com,Female,54.60.24.128,2015-12-24 12:19:28
    +2,Arthur,Kelly,akelly1@eepurl.com,Male,62.56.24.215,2015-10-28 16:22:15
    +3,Rachel,Moreno,rmoreno2@msu.edu,Female,31.222.249.23,2016-04-05 02:05:30
    +4,Ralph,Turner,rturner3@hp.com,Male,157.83.76.114,2016-08-08 00:06:51
    +5,Laura,Gonzales,lgonzales4@howstuffworks.com,Female,30.54.105.168,2016-09-01 08:25:38
    +6,Katherine,Lopez,null,Female,169.138.46.89,2016-08-30 18:52:11
    +7,Jeremy,Hamilton,jhamilton6@mozilla.org,Male,231.189.13.133,2016-07-17 02:09:46
    +8,Heather,Rose,hrose7@goodreads.com,Female,87.165.201.65,2015-12-29 22:03:56
    +9,Gregory,Kelly,gkelly8@trellian.com,Male,154.209.99.7,2016-03-24 21:18:16
    +10,Rachel,Lopez,rlopez9@themeforest.net,Female,237.165.82.71,2016-08-20 15:44:49
    +11,Donna,Welch,dwelcha@shutterfly.com,Female,103.33.110.138,2016-02-27 01:41:48
    +12,Russell,Lawrence,rlawrenceb@qq.com,Male,189.115.73.4,2016-06-11 03:07:09
    +13,Michelle,Montgomery,mmontgomeryc@scientificamerican.com,Female,243.220.95.82,2016-06-18 16:27:19
    +14,Walter,Castillo,null,Male,71.159.238.196,2016-10-06 01:55:44
    +15,Robin,Mills,rmillse@vkontakte.ru,Female,172.190.5.50,2016-10-31 11:41:21
    +16,Raymond,Holmes,rholmesf@usgs.gov,Male,148.153.166.95,2016-10-03 08:16:38
    +17,Gary,Bishop,gbishopg@plala.or.jp,Male,161.108.182.13,2016-08-29 19:35:20
    +18,Anna,Riley,arileyh@nasa.gov,Female,253.31.108.22,2015-12-11 04:34:27
    +19,Sarah,Knight,sknighti@foxnews.com,Female,222.220.3.177,2016-09-26 00:49:06
    +20,Phyllis,Fox,pfoxj@creativecommons.org,Female,163.191.232.95,2016-08-21 10:35:19
    +"""
    +
    +
    +tests__sample_test_sql = """
    +{{ config(severity='error' if var('strict', false) else 'warn') }}
    +select * from {{ ref("sample_model") }} where email is null
    +"""
    +
    +
    +@pytest.fixture(scope="class")
    +def models():
    +    return {"sample_model.sql": models__sample_model_sql, "schema.yml": models__schema_yml}
    +
    +
    +@pytest.fixture(scope="class")
    +def seeds():
    +    return {"sample_seed.csv": seeds__sample_seed_csv}
    +
    +
    +@pytest.fixture(scope="class")
    +def tests():
    +    return {"null_email.sql": tests__sample_test_sql}
    +
    +
    +@pytest.fixture(scope="class")
    +def project_config_update():
    +    return {
    +        "config-version": 2,
    +        "seed-paths": ["seeds"],
    +        "test-paths": ["tests"],
    +        "seeds": {
    +            "quote_columns": False,
    +        },
    +    }
    +
    +
    +class TestSeverity:
    +    @pytest.fixture(scope="class", autouse=True)
    +    def seed_and_run(self, project):
    +        run_dbt(["seed"])
    +        run_dbt(["run"])
    +
    +    def test_generic_default(self, project):
    +        results = run_dbt(["test", "--select", "test_type:generic"])
    +        assert len(results) == 2
    +        assert all([r.status == "warn" for r in results])
    +        assert all([r.failures == 2 for r in results])
    +
    +    def test_generic_strict(self, project):
    +        results = run_dbt(
    +            ["test", "--select", "test_type:generic", "--vars", '{"strict": True}'],
    +            expect_pass=False,
    +        )
    +        assert len(results) == 2
    +        assert all([r.status == "fail" for r in results])
    +        assert all([r.failures == 2 for r in results])
    +
    +    def test_singular_default(self, project):
    +        results = run_dbt(["test", "--select", "test_type:singular"])
    +        assert len(results) == 1
    +        assert all([r.status == "warn" for r in results])
    +        assert all([r.failures == 2 for r in results])
    +
    +    def test_singular_strict(self, project):
    +        results = run_dbt(
    +            ["test", "--select", "test_type:singular", "--vars", '{"strict": True}'],
    +            expect_pass=False,
    +        )
    +        assert len(results) == 1
    +        assert all([r.status == "fail" for r in results])
    +        assert all([r.failures == 2 for r in results])
    diff --git a/tests/functional/simple_snapshot/test_missing_strategy_snapshot.py b/tests/functional/simple_snapshot/test_missing_strategy_snapshot.py
    index 33e6b61aebc..dfb51f7992e 100644
    --- a/tests/functional/simple_snapshot/test_missing_strategy_snapshot.py
    +++ b/tests/functional/simple_snapshot/test_missing_strategy_snapshot.py
    @@ -1,6 +1,6 @@
     import pytest
     from dbt.tests.util import run_dbt
    -from dbt.exceptions import ParsingException
    +from dbt.exceptions import ParsingError
     from tests.functional.simple_snapshot.fixtures import (
         models__schema_yml,
         models__ref_snapshot_sql,
    @@ -43,7 +43,7 @@ def macros():
     
     
     def test_missing_strategy(project):
    -    with pytest.raises(ParsingException) as exc:
    +    with pytest.raises(ParsingError) as exc:
             run_dbt(["compile"], expect_pass=False)
     
         assert "Snapshots must be configured with a 'strategy'" in str(exc.value)
    diff --git a/tests/functional/source_overrides/test_source_overrides_duplicate_model.py b/tests/functional/source_overrides/test_source_overrides_duplicate_model.py
    index cd35fd6f7c2..e3cdebe4794 100644
    --- a/tests/functional/source_overrides/test_source_overrides_duplicate_model.py
    +++ b/tests/functional/source_overrides/test_source_overrides_duplicate_model.py
    @@ -1,5 +1,5 @@
     import os
    -from dbt.exceptions import CompilationException
    +from dbt.exceptions import CompilationError
     import pytest
     
     from dbt.tests.util import run_dbt
    @@ -56,7 +56,7 @@ def project_config_update(self):
     
         def test_source_duplicate_overrides(self, project):
             run_dbt(["deps"])
    -        with pytest.raises(CompilationException) as exc:
    +        with pytest.raises(CompilationError) as exc:
                 run_dbt(["compile"])
     
             assert "dbt found two schema.yml entries for the same source named" in str(exc.value)
    diff --git a/tests/functional/sources/test_simple_source.py b/tests/functional/sources/test_simple_source.py
    index 0c69f859b6b..cd08647f367 100644
    --- a/tests/functional/sources/test_simple_source.py
    +++ b/tests/functional/sources/test_simple_source.py
    @@ -1,7 +1,7 @@
     import os
     import pytest
     import yaml
    -from dbt.exceptions import ParsingException
    +from dbt.exceptions import ParsingError
     
     from dbt.tests.util import (
         run_dbt,
    @@ -164,7 +164,7 @@ def models(self):
             }
     
         def test_malformed_schema_will_break_run(self, project):
    -        with pytest.raises(ParsingException):
    +        with pytest.raises(ParsingError):
                 self.run_dbt_with_vars(project, ["seed"])
     
     
    diff --git a/tests/functional/sources/test_source_fresher_state.py b/tests/functional/sources/test_source_fresher_state.py
    index 362f9a816c0..3ad69d97e6f 100644
    --- a/tests/functional/sources/test_source_fresher_state.py
    +++ b/tests/functional/sources/test_source_fresher_state.py
    @@ -4,7 +4,7 @@
     import pytest
     from datetime import datetime, timedelta
     
    -from dbt.exceptions import InternalException
    +from dbt.exceptions import DbtInternalError
     
     
     from dbt.tests.util import AnyStringWith, AnyFloat
    @@ -112,7 +112,7 @@ def _assert_freshness_results(self, path, state):
                         "warn_after": {"count": 10, "period": "hour"},
                         "error_after": {"count": 18, "period": "hour"},
                     },
    -                "adapter_response": {},
    +                "adapter_response": {"_message": "SELECT 1", "code": "SELECT", "rows_affected": 1},
                     "thread_id": AnyStringWith("Thread-"),
                     "execution_time": AnyFloat(),
                     "timing": [
    @@ -619,7 +619,7 @@ class TestSourceFresherNoPreviousState(SuccessfulSourceFreshnessTest):
         def test_intentional_failure_no_previous_state(self, project):
             self.run_dbt_with_vars(project, ["run"])
             # TODO add the current and previous but with previous as null
    -        with pytest.raises(InternalException) as excinfo:
    +        with pytest.raises(DbtInternalError) as excinfo:
                 self.run_dbt_with_vars(
                     project,
                     ["run", "-s", "source_status:fresher", "--defer", "--state", "previous_state"],
    @@ -641,7 +641,7 @@ def test_intentional_failure_no_previous_state(self, project):
             copy_to_previous_state()
             assert previous_state_results[0].max_loaded_at is not None
     
    -        with pytest.raises(InternalException) as excinfo:
    +        with pytest.raises(DbtInternalError) as excinfo:
                 self.run_dbt_with_vars(
                     project,
                     ["run", "-s", "source_status:fresher", "--defer", "--state", "previous_state"],
    diff --git a/tests/functional/sources/test_source_freshness.py b/tests/functional/sources/test_source_freshness.py
    index 630f59a0205..e7e1f08ebc5 100644
    --- a/tests/functional/sources/test_source_freshness.py
    +++ b/tests/functional/sources/test_source_freshness.py
    @@ -103,7 +103,7 @@ def _assert_freshness_results(self, path, state):
                         "warn_after": {"count": 10, "period": "hour"},
                         "error_after": {"count": 18, "period": "hour"},
                     },
    -                "adapter_response": {},
    +                "adapter_response": {"_message": "SELECT 1", "code": "SELECT", "rows_affected": 1},
                     "thread_id": AnyStringWith("Thread-"),
                     "execution_time": AnyFloat(),
                     "timing": [
    diff --git a/tests/functional/statements/test_statements.py b/tests/functional/statements/test_statements.py
    index 4b8640b8066..b3d615a2b69 100644
    --- a/tests/functional/statements/test_statements.py
    +++ b/tests/functional/statements/test_statements.py
    @@ -1,11 +1,7 @@
     import pathlib
     import pytest
     
    -from dbt.tests.util import (
    -    run_dbt,
    -    check_relations_equal,
    -    write_file
    -)
    +from dbt.tests.util import run_dbt, check_relations_equal, write_file
     from tests.functional.statements.fixtures import (
         models__statement_actual,
         seeds__statement_actual,
    @@ -19,7 +15,9 @@ def setUp(self, project):
             # put seeds in 'seed' not 'seeds' directory
             (pathlib.Path(project.project_root) / "seed").mkdir(parents=True, exist_ok=True)
             write_file(seeds__statement_actual, project.project_root, "seed", "seed.csv")
    -        write_file(seeds__statement_expected, project.project_root, "seed", "statement_expected.csv")
    +        write_file(
    +            seeds__statement_expected, project.project_root, "seed", "statement_expected.csv"
    +        )
     
         @pytest.fixture(scope="class")
         def models(self):
    diff --git a/tests/functional/store_test_failures_tests/test_store_test_failures.py b/tests/functional/store_test_failures_tests/test_store_test_failures.py
    index ff26d7d97d3..15527c86bd3 100644
    --- a/tests/functional/store_test_failures_tests/test_store_test_failures.py
    +++ b/tests/functional/store_test_failures_tests/test_store_test_failures.py
    @@ -38,10 +38,8 @@ def seeds(self):
                 "people.csv": seeds__people,
                 "expected_accepted_values.csv": seeds__expected_accepted_values,
                 "expected_failing_test.csv": seeds__expected_failing_test,
    -            "expected_not_null_problematic_model_id.csv":
    -                seeds__expected_not_null_problematic_model_id,
    -            "expected_unique_problematic_model_id.csv":
    -                seeds__expected_unique_problematic_model_id,
    +            "expected_not_null_problematic_model_id.csv": seeds__expected_not_null_problematic_model_id,
    +            "expected_unique_problematic_model_id.csv": seeds__expected_unique_problematic_model_id,
             }
     
         @pytest.fixture(scope="class")
    @@ -59,8 +57,7 @@ def properties(self):
         def models(self):
             return {
                 "fine_model.sql": models__fine_model,
    -            "fine_model_but_with_a_no_good_very_long_name.sql":
    -                models__file_model_but_with_a_no_good_very_long_name,
    +            "fine_model_but_with_a_no_good_very_long_name.sql": models__file_model_but_with_a_no_good_very_long_name,
                 "problematic_model.sql": models__problematic_model,
             }
     
    @@ -71,9 +68,7 @@ def project_config_update(self):
                     "quote_columns": False,
                     "test": self.column_type_overrides(),
                 },
    -            "tests": {
    -                "+schema": TEST_AUDIT_SCHEMA_SUFFIX
    -            }
    +            "tests": {"+schema": TEST_AUDIT_SCHEMA_SUFFIX},
             }
     
         def column_type_overrides(self):
    @@ -87,8 +82,8 @@ def run_tests_store_one_failure(self, project):
                 project.adapter,
                 [
                     f"{self.test_audit_schema}.unique_problematic_model_id",
    -                "expected_unique_problematic_model_id"
    -            ]
    +                "expected_unique_problematic_model_id",
    +            ],
             )
     
         def run_tests_store_failures_and_assert(self, project):
    @@ -98,39 +93,59 @@ def run_tests_store_failures_and_assert(self, project):
     
             # compare test results
             actual = [(r.status, r.failures) for r in results]
    -        expected = [('pass', 0), ('pass', 0), ('pass', 0), ('pass', 0),
    -                    ('fail', 2), ('fail', 2), ('fail', 2), ('fail', 10)]
    +        expected = [
    +            ("pass", 0),
    +            ("pass", 0),
    +            ("pass", 0),
    +            ("pass", 0),
    +            ("fail", 2),
    +            ("fail", 2),
    +            ("fail", 2),
    +            ("fail", 10),
    +        ]
             assert sorted(actual) == sorted(expected)
     
             # compare test results stored in database
    -        check_relations_equal(project.adapter, [
    -            f"{self.test_audit_schema}.failing_test",
    -            "expected_failing_test"
    -        ])
    -        check_relations_equal(project.adapter, [
    -            f"{self.test_audit_schema}.not_null_problematic_model_id",
    -            "expected_not_null_problematic_model_id"
    -        ])
    -        check_relations_equal(project.adapter, [
    -            f"{self.test_audit_schema}.unique_problematic_model_id",
    -            "expected_unique_problematic_model_id"
    -        ])
    -        check_relations_equal(project.adapter, [
    -            f"{self.test_audit_schema}.accepted_values_problemat"
    -            "ic_mo_c533ab4ca65c1a9dbf14f79ded49b628",
    -            "expected_accepted_values"
    -        ])
    +        check_relations_equal(
    +            project.adapter, [f"{self.test_audit_schema}.failing_test", "expected_failing_test"]
    +        )
    +        check_relations_equal(
    +            project.adapter,
    +            [
    +                f"{self.test_audit_schema}.not_null_problematic_model_id",
    +                "expected_not_null_problematic_model_id",
    +            ],
    +        )
    +        check_relations_equal(
    +            project.adapter,
    +            [
    +                f"{self.test_audit_schema}.unique_problematic_model_id",
    +                "expected_unique_problematic_model_id",
    +            ],
    +        )
    +        check_relations_equal(
    +            project.adapter,
    +            [
    +                f"{self.test_audit_schema}.accepted_values_problemat"
    +                "ic_mo_c533ab4ca65c1a9dbf14f79ded49b628",
    +                "expected_accepted_values",
    +            ],
    +        )
     
     
     class TestStoreTestFailures(StoreTestFailuresBase):
         @pytest.fixture(scope="function")
         def clean_up(self, project):
             yield
    -        with project.adapter.connection_named('__test'):
    -            relation = project.adapter.Relation.create(database=project.database, schema=self.test_audit_schema)
    +        with project.adapter.connection_named("__test"):
    +            relation = project.adapter.Relation.create(
    +                database=project.database, schema=self.test_audit_schema
    +            )
                 project.adapter.drop_schema(relation)
     
    -            relation = project.adapter.Relation.create(database=project.database, schema=project.test_schema)
    +            relation = project.adapter.Relation.create(
    +                database=project.database, schema=project.test_schema
    +            )
                 project.adapter.drop_schema(relation)
     
         def column_type_overrides(self):
    diff --git a/tests/functional/test_selection/fixtures.py b/tests/functional/test_selection/fixtures.py
    index ae798edd3fd..48c3f40c62d 100644
    --- a/tests/functional/test_selection/fixtures.py
    +++ b/tests/functional/test_selection/fixtures.py
    @@ -64,7 +64,7 @@
         tags = ['a_or_b']
     ) }}
     
    -select 1 as fun
    +select * FROM {{ref('model_b')}}
     """
     
     
    diff --git a/tests/functional/test_selection/test_selection_expansion.py b/tests/functional/test_selection/test_selection_expansion.py
    index b563398e89f..290b8f066ff 100644
    --- a/tests/functional/test_selection/test_selection_expansion.py
    +++ b/tests/functional/test_selection/test_selection_expansion.py
    @@ -184,6 +184,24 @@ def test_model_a_exclude_specific_test_cautious(
             self.list_tests_and_assert(select, exclude, expected, indirect_selection)
             self.run_tests_and_assert(select, exclude, expected, indirect_selection)
     
    +    def test_model_a_exclude_specific_test_buildable(
    +        self,
    +        project,
    +    ):
    +        select = "model_a"
    +        exclude = "unique_model_a_fun"
    +        expected = [
    +            "just_a",
    +            "cf_a_b",
    +            "cf_a_src",
    +            "relationships_model_a_fun__fun__ref_model_b_",
    +            "relationships_model_a_fun__fun__source_my_src_my_tbl_",
    +        ]
    +        indirect_selection = "buildable"
    +
    +        self.list_tests_and_assert(select, exclude, expected, indirect_selection)
    +        self.run_tests_and_assert(select, exclude, expected, indirect_selection)
    +
         def test_only_generic(
             self,
             project,
    @@ -374,6 +392,40 @@ def test_model_a_indirect_selection_eager(
             self.list_tests_and_assert(select, exclude, expected, indirect_selection)
             self.run_tests_and_assert(select, exclude, expected, indirect_selection)
     
    +    def test_model_a_indirect_selection_cautious(
    +        self,
    +        project,
    +    ):
    +        select = "model_a"
    +        exclude = None
    +        expected = [
    +            "just_a",
    +            "unique_model_a_fun",
    +        ]
    +        indirect_selection = "cautious"
    +
    +        self.list_tests_and_assert(select, exclude, expected, indirect_selection)
    +        self.run_tests_and_assert(select, exclude, expected, indirect_selection)
    +
    +    def test_model_a_indirect_selection_buildable(
    +        self,
    +        project,
    +    ):
    +        select = "model_a"
    +        exclude = None
    +        expected = [
    +            "cf_a_b",
    +            "cf_a_src",
    +            "just_a",
    +            "relationships_model_a_fun__fun__ref_model_b_",
    +            "relationships_model_a_fun__fun__source_my_src_my_tbl_",
    +            "unique_model_a_fun",
    +        ]
    +        indirect_selection = "buildable"
    +
    +        self.list_tests_and_assert(select, exclude, expected, indirect_selection)
    +        self.run_tests_and_assert(select, exclude, expected, indirect_selection)
    +
         def test_model_a_indirect_selection_exclude_unique_tests(
             self,
             project,
    @@ -402,16 +454,21 @@ def selectors(self):
                   definition:
                     method: fqn
                     value: model_a
    -            - name: model_a_no_indirect_selection
    +            - name: model_a_cautious_indirect_selection
                   definition:
                     method: fqn
                     value: model_a
                     indirect_selection: "cautious"
    -            - name: model_a_yes_indirect_selection
    +            - name: model_a_eager_indirect_selection
                   definition:
                     method: fqn
                     value: model_a
                     indirect_selection: "eager"
    +            - name: model_a_buildable_indirect_selection
    +              definition:
    +                method: fqn
    +                value: model_a
    +                indirect_selection: "buildable"
             """
     
         def test_selector_model_a_unset_indirect_selection(
    @@ -440,7 +497,7 @@ def test_selector_model_a_unset_indirect_selection(
                 selector_name="model_a_unset_indirect_selection",
             )
     
    -    def test_selector_model_a_no_indirect_selection(
    +    def test_selector_model_a_cautious_indirect_selection(
             self,
             project,
         ):
    @@ -450,16 +507,42 @@ def test_selector_model_a_no_indirect_selection(
                 include=None,
                 exclude=None,
                 expected_tests=expected,
    -            selector_name="model_a_no_indirect_selection",
    +            selector_name="model_a_cautious_indirect_selection",
    +        )
    +        self.run_tests_and_assert(
    +            include=None,
    +            exclude=None,
    +            expected_tests=expected,
    +            selector_name="model_a_cautious_indirect_selection",
    +        )
    +
    +    def test_selector_model_a_eager_indirect_selection(
    +        self,
    +        project,
    +    ):
    +        expected = [
    +            "cf_a_b",
    +            "cf_a_src",
    +            "just_a",
    +            "relationships_model_a_fun__fun__ref_model_b_",
    +            "relationships_model_a_fun__fun__source_my_src_my_tbl_",
    +            "unique_model_a_fun",
    +        ]
    +
    +        self.list_tests_and_assert(
    +            include=None,
    +            exclude=None,
    +            expected_tests=expected,
    +            selector_name="model_a_eager_indirect_selection",
             )
             self.run_tests_and_assert(
                 include=None,
                 exclude=None,
                 expected_tests=expected,
    -            selector_name="model_a_no_indirect_selection",
    +            selector_name="model_a_eager_indirect_selection",
             )
     
    -    def test_selector_model_a_yes_indirect_selection(
    +    def test_selector_model_a_buildable_indirect_selection(
             self,
             project,
         ):
    @@ -476,11 +559,11 @@ def test_selector_model_a_yes_indirect_selection(
                 include=None,
                 exclude=None,
                 expected_tests=expected,
    -            selector_name="model_a_yes_indirect_selection",
    +            selector_name="model_a_buildable_indirect_selection",
             )
             self.run_tests_and_assert(
                 include=None,
                 exclude=None,
                 expected_tests=expected,
    -            selector_name="model_a_yes_indirect_selection",
    +            selector_name="model_a_buildable_indirect_selection",
             )
    diff --git a/tests/functional/timezones/test_timezones.py b/tests/functional/timezones/test_timezones.py
    new file mode 100644
    index 00000000000..7b0135442c8
    --- /dev/null
    +++ b/tests/functional/timezones/test_timezones.py
    @@ -0,0 +1,67 @@
    +import os
    +import pytest
    +from freezegun import freeze_time
    +
    +from dbt.tests.util import run_dbt
    +
    +
    +model_sql = """
    +{{
    +    config(
    +        materialized='table'
    +    )
    +}}
    +
    +select
    +    '{{ run_started_at.astimezone(modules.pytz.timezone("America/New_York")) }}' as run_started_at_est,
    +    '{{ run_started_at }}' as run_started_at_utc
    +"""
    +
    +
    +class TestTimezones:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"timezones.sql": model_sql}
    +
    +    @pytest.fixture(scope="class")
    +    def dbt_profile_data(self, unique_schema):
    +        return {
    +            "test": {
    +                "outputs": {
    +                    "dev": {
    +                        "type": "postgres",
    +                        "threads": 1,
    +                        "host": "localhost",
    +                        "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)),
    +                        "user": os.getenv("POSTGRES_TEST_USER", "root"),
    +                        "pass": os.getenv("POSTGRES_TEST_PASS", "password"),
    +                        "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"),
    +                        "schema": unique_schema,
    +                    },
    +                },
    +                "target": "dev",
    +            }
    +        }
    +
    +    @pytest.fixture(scope="class")
    +    def query(self, project):
    +        return """
    +            select
    +              run_started_at_est,
    +              run_started_at_utc
    +            from {schema}.timezones
    +        """.format(
    +            schema=project.test_schema
    +        )
    +
    +    @freeze_time("2022-01-01 03:00:00", tz_offset=0)
    +    def test_run_started_at(self, project, query):
    +        results = run_dbt(["run"])
    +
    +        assert len(results) == 1
    +
    +        result = project.run_sql(query, fetch="all")[0]
    +        est, utc = result
    +
    +        assert utc == "2022-01-01 03:00:00+00:00"
    +        assert est == "2021-12-31 22:00:00-05:00"
    diff --git a/tests/unit/test_cli_flags.py b/tests/unit/test_cli_flags.py
    index d3dedac2390..462f801e2ae 100644
    --- a/tests/unit/test_cli_flags.py
    +++ b/tests/unit/test_cli_flags.py
    @@ -26,23 +26,28 @@ def test_mp_context(self, run_context):
             flags = Flags(run_context)
             assert flags.MP_CONTEXT == get_context("spawn")
     
    -    @pytest.mark.parametrize('param', cli.params)
    +    @pytest.mark.parametrize("param", cli.params)
         def test_cli_group_flags_from_params(self, run_context, param):
             flags = Flags(run_context)
             assert hasattr(flags, param.name.upper())
             assert getattr(flags, param.name.upper()) == run_context.params[param.name.lower()]
     
    -    @pytest.mark.parametrize('do_not_track,expected_anonymous_usage_stats', [
    -        ("1", False),
    -        ("t", False),
    -        ("true", False),
    -        ("y", False),
    -        ("yes", False),
    -        ("false", True),
    -        ("anything", True),
    -        ("2", True),
    -    ])
    -    def test_anonymous_usage_state(self, monkeypatch, run_context, do_not_track, expected_anonymous_usage_stats):
    +    @pytest.mark.parametrize(
    +        "do_not_track,expected_anonymous_usage_stats",
    +        [
    +            ("1", False),
    +            ("t", False),
    +            ("true", False),
    +            ("y", False),
    +            ("yes", False),
    +            ("false", True),
    +            ("anything", True),
    +            ("2", True),
    +        ],
    +    )
    +    def test_anonymous_usage_state(
    +        self, monkeypatch, run_context, do_not_track, expected_anonymous_usage_stats
    +    ):
             monkeypatch.setenv("DO_NOT_TRACK", do_not_track)
     
             flags = Flags(run_context)
    @@ -52,16 +57,16 @@ def test_empty_user_config_uses_default(self, run_context):
             user_config = UserConfig()
     
             flags = Flags(run_context, user_config)
    -        assert flags.USE_COLORS == run_context.params['use_colors']
    +        assert flags.USE_COLORS == run_context.params["use_colors"]
     
         def test_none_user_config_uses_default(self, run_context):
             flags = Flags(run_context, None)
    -        assert flags.USE_COLORS == run_context.params['use_colors']
    +        assert flags.USE_COLORS == run_context.params["use_colors"]
     
         def test_prefer_user_config_to_default(self, run_context):
             user_config = UserConfig(use_colors=False)
             # ensure default value is not the same as user config
    -        assert run_context.params['use_colors'] is not user_config.use_colors
    +        assert run_context.params["use_colors"] is not user_config.use_colors
     
             flags = Flags(run_context, user_config)
             assert flags.USE_COLORS == user_config.use_colors
    diff --git a/tests/unit/test_connection_retries.py b/tests/unit/test_connection_retries.py
    index 8b031ce5ab4..9076adb7ef9 100644
    --- a/tests/unit/test_connection_retries.py
    +++ b/tests/unit/test_connection_retries.py
    @@ -1,7 +1,7 @@
     import functools
     import pytest
     from requests.exceptions import RequestException
    -from dbt.exceptions import ConnectionException
    +from dbt.exceptions import ConnectionError
     from dbt.utils import _connection_exception_retry
     
     
    @@ -28,7 +28,7 @@ class TestMaxRetries:
         def test_no_retry(self):
             fn_to_retry = functools.partial(no_success_fn)
     
    -        with pytest.raises(ConnectionException):
    +        with pytest.raises(ConnectionError):
                 _connection_exception_retry(fn_to_retry, 3)
     
     
    diff --git a/tests/unit/test_deprecations.py b/tests/unit/test_deprecations.py
    new file mode 100644
    index 00000000000..ce80ba3d040
    --- /dev/null
    +++ b/tests/unit/test_deprecations.py
    @@ -0,0 +1,602 @@
    +import argparse
    +import pytest
    +
    +from dbt.internal_deprecations import deprecated
    +import dbt.exceptions
    +from dbt.node_types import NodeType
    +
    +
    +@deprecated(reason="just because", version="1.23.0", suggested_action="Make some updates")
    +def to_be_decorated():
    +    return 5
    +
    +
    +# simpletest that the return value is not modified
    +def test_deprecated_func():
    +    assert hasattr(to_be_decorated, "__wrapped__")
    +    assert to_be_decorated() == 5
    +
    +
    +class TestDeprecatedFunctions:
    +    def is_deprecated(self, func):
    +        assert hasattr(func, "__wrapped__")
    +        # TODO: add in log check
    +
    +    def test_warn(self):
    +        self.is_deprecated(dbt.exceptions.warn)
    +
    +
    +class TestDeprecatedExceptionFunctions:
    +    def runFunc(self, func, *args):
    +        return func(*args)
    +
    +    def is_deprecated(self, func):
    +        assert hasattr(func, "__wrapped__")
    +        # TODO: add in log check
    +
    +    def test_missing_config(self):
    +        func = dbt.exceptions.missing_config
    +        exception = dbt.exceptions.MissingConfigError
    +        model = argparse.Namespace()
    +        model.unique_id = ""
    +        name = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(model, name)
    +
    +    def test_missing_materialization(self):
    +        func = dbt.exceptions.missing_materialization
    +        exception = dbt.exceptions.MissingMaterializationError
    +        model = argparse.Namespace()
    +        model.config = argparse.Namespace()
    +        model.config.materialized = ""
    +        adapter_type = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(model, adapter_type)
    +
    +    def test_missing_relation(self):
    +        func = dbt.exceptions.missing_relation
    +        exception = dbt.exceptions.MissingRelationError
    +        relation = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(relation)
    +
    +    def test_raise_ambiguous_alias(self):
    +        func = dbt.exceptions.raise_ambiguous_alias
    +        exception = dbt.exceptions.AmbiguousAliasError
    +        node_1 = argparse.Namespace()
    +        node_1.unique_id = ""
    +        node_1.original_file_path = ""
    +        node_2 = argparse.Namespace()
    +        node_2.unique_id = ""
    +        node_2.original_file_path = ""
    +        duped_name = "string"
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(node_1, node_2, duped_name)
    +
    +    def test_raise_ambiguous_catalog_match(self):
    +        func = dbt.exceptions.raise_ambiguous_catalog_match
    +        exception = dbt.exceptions.AmbiguousCatalogMatchError
    +        unique_id = ""
    +        match_1 = {"metadata": {"schema": ""}}
    +        match_2 = {"metadata": {"schema": ""}}
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(unique_id, match_1, match_2)
    +
    +    def test_raise_cache_inconsistent(self):
    +        func = dbt.exceptions.raise_cache_inconsistent
    +        exception = dbt.exceptions.CacheInconsistencyError
    +        msg = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(msg)
    +
    +    def test_raise_dataclass_not_dict(self):
    +        func = dbt.exceptions.raise_dataclass_not_dict
    +        exception = dbt.exceptions.DataclassNotDictError
    +        obj = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(obj)
    +
    +    def test_raise_compiler_error(self):
    +        func = dbt.exceptions.raise_compiler_error
    +        exception = dbt.exceptions.CompilationError
    +        msg = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(msg)
    +
    +    def test_raise_database_error(self):
    +        func = dbt.exceptions.raise_database_error
    +        exception = dbt.exceptions.DbtDatabaseError
    +        msg = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(msg)
    +
    +    def test_raise_dep_not_found(self):
    +        func = dbt.exceptions.raise_dep_not_found
    +        exception = dbt.exceptions.DependencyNotFoundError
    +        node = ""
    +        node_description = ""
    +        required_pkg = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(node, node_description, required_pkg)
    +
    +    def test_raise_dependency_error(self):
    +        func = dbt.exceptions.raise_dependency_error
    +        exception = dbt.exceptions.DependencyError
    +        msg = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(msg)
    +
    +    def test_raise_duplicate_patch_name(self):
    +        func = dbt.exceptions.raise_duplicate_patch_name
    +        exception = dbt.exceptions.DuplicatePatchPathError
    +        patch_1 = argparse.Namespace()
    +        patch_1.name = ""
    +        patch_1.original_file_path = ""
    +        existing_patch_path = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(patch_1, existing_patch_path)
    +
    +    def test_raise_duplicate_resource_name(self):
    +        func = dbt.exceptions.raise_duplicate_resource_name
    +        exception = dbt.exceptions.DuplicateResourceNameError
    +        node_1 = argparse.Namespace()
    +        node_1.name = ""
    +        node_1.resource_type = NodeType("model")
    +        node_1.column_name = ""
    +        node_1.unique_id = ""
    +        node_1.original_file_path = ""
    +        node_2 = argparse.Namespace()
    +        node_2.name = ""
    +        node_2.resource_type = ""
    +        node_2.unique_id = ""
    +        node_2.original_file_path = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(node_1, node_2)
    +
    +    def test_raise_invalid_property_yml_version(self):
    +        func = dbt.exceptions.raise_invalid_property_yml_version
    +        exception = dbt.exceptions.PropertyYMLError
    +        path = ""
    +        issue = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(path, issue)
    +
    +    def test_raise_not_implemented(self):
    +        func = dbt.exceptions.raise_not_implemented
    +        exception = dbt.exceptions.NotImplementedError
    +        msg = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(msg)
    +
    +    def test_relation_wrong_type(self):
    +        func = dbt.exceptions.relation_wrong_type
    +        exception = dbt.exceptions.RelationWrongTypeError
    +
    +        relation = argparse.Namespace()
    +        relation.type = ""
    +        expected_type = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(relation, expected_type)
    +
    +    def test_raise_duplicate_alias(self):
    +        func = dbt.exceptions.raise_duplicate_alias
    +        exception = dbt.exceptions.DuplicateAliasError
    +        kwargs = {"": ""}
    +        aliases = {"": ""}
    +        canonical_key = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(kwargs, aliases, canonical_key)
    +
    +    def test_raise_duplicate_source_patch_name(self):
    +        func = dbt.exceptions.raise_duplicate_source_patch_name
    +        exception = dbt.exceptions.DuplicateSourcePatchNameError
    +        patch_1 = argparse.Namespace()
    +        patch_1.name = ""
    +        patch_1.path = ""
    +        patch_1.overrides = ""
    +        patch_2 = argparse.Namespace()
    +        patch_2.path = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(patch_1, patch_2)
    +
    +    def test_raise_duplicate_macro_patch_name(self):
    +        func = dbt.exceptions.raise_duplicate_macro_patch_name
    +        exception = dbt.exceptions.DuplicateMacroPatchNameError
    +        patch_1 = argparse.Namespace()
    +        patch_1.package_name = ""
    +        patch_1.name = ""
    +        patch_1.original_file_path = ""
    +        existing_patch_path = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(patch_1, existing_patch_path)
    +
    +    def test_raise_duplicate_macro_name(self):
    +        func = dbt.exceptions.raise_duplicate_macro_name
    +        exception = dbt.exceptions.DuplicateMacroNameError
    +        node_1 = argparse.Namespace()
    +        node_1.name = ""
    +        node_1.package_name = ""
    +        node_1.original_file_path = ""
    +        node_1.unique_id = ""
    +        node_2 = argparse.Namespace()
    +        node_2.package_name = ""
    +        node_2.unique_id = ""
    +        node_2.original_file_path = ""
    +        namespace = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(node_1, node_2, namespace)
    +
    +    def test_approximate_relation_match(self):
    +        func = dbt.exceptions.approximate_relation_match
    +        exception = dbt.exceptions.ApproximateMatchError
    +        target = ""
    +        relation = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(target, relation)
    +
    +    def test_get_relation_returned_multiple_results(self):
    +        func = dbt.exceptions.get_relation_returned_multiple_results
    +        exception = dbt.exceptions.RelationReturnedMultipleResultsError
    +        kwargs = {}
    +        matches = []
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(kwargs, matches)
    +
    +    def test_system_error(self):
    +        func = dbt.exceptions.system_error
    +        exception = dbt.exceptions.OperationError
    +        operation_name = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(operation_name)
    +
    +    def test_invalid_materialization_argument(self):
    +        func = dbt.exceptions.invalid_materialization_argument
    +        exception = dbt.exceptions.MaterializationArgError
    +        name = ""
    +        argument = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(name, argument)
    +
    +    def test_bad_package_spec(self):
    +        func = dbt.exceptions.bad_package_spec
    +        exception = dbt.exceptions.BadSpecError
    +        repo = ""
    +        spec = ""
    +        error = argparse.Namespace()
    +        error.stderr = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(repo, spec, error)
    +
    +    # def test_raise_git_cloning_error(self):
    +    #     func = dbt.exceptions.raise_git_cloning_error
    +    #     exception = dbt.exceptions.CommandResultError
    +
    +    #     error = dbt.exceptions.CommandResultError
    +    #     error.cwd = ""
    +    #     error.cmd = [""]
    +    #     error.returncode = 1
    +    #     error.stdout = ""
    +    #     error.stderr = ""
    +
    +    #     self.is_deprecated(func)
    +
    +    #     assert(hasattr(func, '__wrapped__'))
    +    #     with pytest.raises(exception):
    +    #         func(error)
    +
    +    def test_raise_git_cloning_problem(self):
    +        func = dbt.exceptions.raise_git_cloning_problem
    +        exception = dbt.exceptions.UnknownGitCloningProblemError
    +        repo = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(repo)
    +
    +    def test_macro_invalid_dispatch_arg(self):
    +        func = dbt.exceptions.macro_invalid_dispatch_arg
    +        exception = dbt.exceptions.MacroDispatchArgError
    +        macro_name = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(macro_name)
    +
    +    def test_dependency_not_found(self):
    +        func = dbt.exceptions.dependency_not_found
    +        exception = dbt.exceptions.GraphDependencyNotFoundError
    +        node = argparse.Namespace()
    +        node.unique_id = ""
    +        dependency = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(node, dependency)
    +
    +    def test_target_not_found(self):
    +        func = dbt.exceptions.target_not_found
    +        exception = dbt.exceptions.TargetNotFoundError
    +        node = argparse.Namespace()
    +        node.unique_id = ""
    +        node.original_file_path = ""
    +        node.resource_type = ""
    +        target_name = ""
    +        target_kind = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(node, target_name, target_kind)
    +
    +    def test_doc_target_not_found(self):
    +        func = dbt.exceptions.doc_target_not_found
    +        exception = dbt.exceptions.DocTargetNotFoundError
    +        model = argparse.Namespace()
    +        model.unique_id = ""
    +        target_doc_name = ""
    +        target_doc_package = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(model, target_doc_name, target_doc_package)
    +
    +    def test_ref_bad_context(self):
    +        func = dbt.exceptions.ref_bad_context
    +        exception = dbt.exceptions.RefBadContextError
    +        model = argparse.Namespace()
    +        model.name = ""
    +        args = []
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(model, args)
    +
    +    def test_metric_invalid_args(self):
    +        func = dbt.exceptions.metric_invalid_args
    +        exception = dbt.exceptions.MetricArgsError
    +        model = argparse.Namespace()
    +        model.unique_id = ""
    +        args = []
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(model, args)
    +
    +    def test_ref_invalid_args(self):
    +        func = dbt.exceptions.ref_invalid_args
    +        exception = dbt.exceptions.RefArgsError
    +        model = argparse.Namespace()
    +        model.unique_id = ""
    +        args = []
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(model, args)
    +
    +    def test_invalid_bool_error(self):
    +        func = dbt.exceptions.invalid_bool_error
    +        exception = dbt.exceptions.BooleanError
    +        return_value = ""
    +        macro_name = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(return_value, macro_name)
    +
    +    def test_invalid_type_error(self):
    +        func = dbt.exceptions.invalid_type_error
    +        exception = dbt.exceptions.MacroArgTypeError
    +        method_name = ""
    +        arg_name = ""
    +        got_value = ""
    +        expected_type = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(method_name, arg_name, got_value, expected_type)
    +
    +    def test_disallow_secret_env_var(self):
    +        func = dbt.exceptions.disallow_secret_env_var
    +        exception = dbt.exceptions.SecretEnvVarLocationError
    +        env_var_name = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(env_var_name)
    +
    +    def test_raise_parsing_error(self):
    +        func = dbt.exceptions.raise_parsing_error
    +        exception = dbt.exceptions.ParsingError
    +        msg = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(msg)
    +
    +    def test_raise_unrecognized_credentials_type(self):
    +        func = dbt.exceptions.raise_unrecognized_credentials_type
    +        exception = dbt.exceptions.UnrecognizedCredentialTypeError
    +        typename = ""
    +        supported_types = []
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(typename, supported_types)
    +
    +    def test_raise_patch_targets_not_found(self):
    +        func = dbt.exceptions.raise_patch_targets_not_found
    +        exception = dbt.exceptions.PatchTargetNotFoundError
    +        node = argparse.Namespace()
    +        node.name = ""
    +        node.original_file_path = ""
    +        patches = {"patch": node}
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(patches)
    +
    +    def test_multiple_matching_relations(self):
    +        func = dbt.exceptions.multiple_matching_relations
    +        exception = dbt.exceptions.RelationReturnedMultipleResultsError
    +        kwargs = {}
    +        matches = []
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(kwargs, matches)
    +
    +    def test_materialization_not_available(self):
    +        func = dbt.exceptions.materialization_not_available
    +        exception = dbt.exceptions.MaterializationNotAvailableError
    +        model = argparse.Namespace()
    +        model.config = argparse.Namespace()
    +        model.config.materialized = ""
    +        adapter_type = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(model, adapter_type)
    +
    +    def test_macro_not_found(self):
    +        func = dbt.exceptions.macro_not_found
    +        exception = dbt.exceptions.MacroNotFoundError
    +        model = argparse.Namespace()
    +        model.unique_id = ""
    +        target_macro_id = ""
    +
    +        self.is_deprecated(func)
    +
    +        assert hasattr(func, "__wrapped__")
    +        with pytest.raises(exception):
    +            func(model, target_macro_id)
    diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py
    index 935c3421607..7eea08a9e46 100644
    --- a/tests/unit/test_events.py
    +++ b/tests/unit/test_events.py
    @@ -1,17 +1,19 @@
    -# flake8: noqa
     import re
     from typing import TypeVar
     
    -from dbt.contracts.files import FileHash
    -from dbt.contracts.graph.nodes import ModelNode, NodeConfig, DependsOn
    -from dbt.events import AdapterLogger
    +from dbt.contracts.results import TimingInfo
    +from dbt.events import AdapterLogger, test_types, types
     from dbt.events.base_types import (
         BaseEvent,
    +    DebugLevel,
    +    DynamicLevel,
    +    ErrorLevel,
    +    InfoLevel,
         TestLevel,
    +    WarnLevel,
    +    msg_from_base_event,
     )
    -from dbt.events.functions import event_to_json, event_to_dict
    -from dbt.events.test_types import *
    -from dbt.events.types import *
    +from dbt.events.functions import msg_to_dict, msg_to_json
     
     
     # takes in a class and finds any subclasses for it
    @@ -43,14 +45,14 @@ def test_formatting(self):
             logger.debug("hello {}", "world")
     
             # enters lower in the call stack to test that it formats correctly
    -        event = AdapterEventDebug(name="dbt_tests", base_msg="hello {}", args=("world",))
    +        event = types.AdapterEventDebug(name="dbt_tests", base_msg="hello {}", args=("world",))
             assert "hello world" in event.message()
     
             # tests that it doesn't throw
             logger.debug("1 2 {}", 3)
     
             # enters lower in the call stack to test that it formats correctly
    -        event = AdapterEventDebug(name="dbt_tests", base_msg="1 2 {}", args=(3,))
    +        event = types.AdapterEventDebug(name="dbt_tests", base_msg="1 2 {}", args=(3,))
             assert "1 2 3" in event.message()
     
             # tests that it doesn't throw
    @@ -59,16 +61,16 @@ def test_formatting(self):
             # enters lower in the call stack to test that it formats correctly
             # in this case it's that we didn't attempt to replace anything since there
             # were no args passed after the initial message
    -        event = AdapterEventDebug(name="dbt_tests", base_msg="boop{x}boop", args=())
    +        event = types.AdapterEventDebug(name="dbt_tests", base_msg="boop{x}boop", args=())
             assert "boop{x}boop" in event.message()
     
             # ensure AdapterLogger and subclasses makes all base_msg members
             # of type string; when someone writes logger.debug(a) where a is
             # any non-string object
    -        event = AdapterEventDebug(name="dbt_tests", base_msg=[1,2,3], args=(3,))
    +        event = types.AdapterEventDebug(name="dbt_tests", base_msg=[1, 2, 3], args=(3,))
             assert isinstance(event.base_msg, str)
     
    -        event = JinjaLogDebug(msg=[1,2,3])
    +        event = types.JinjaLogDebug(msg=[1, 2, 3])
             assert isinstance(event.msg, str)
     
     
    @@ -91,197 +93,98 @@ def test_event_codes(self):
                 all_codes.add(code)
     
     
    -def MockNode():
    -    return ModelNode(
    -        alias="model_one",
    -        name="model_one",
    -        database="dbt",
    -        schema="analytics",
    -        resource_type=NodeType.Model,
    -        unique_id="model.root.model_one",
    -        fqn=["root", "model_one"],
    -        package_name="root",
    -        original_file_path="model_one.sql",
    -        root_path="/usr/src/app",
    -        refs=[],
    -        sources=[],
    -        depends_on=DependsOn(),
    -        config=NodeConfig.from_dict(
    -            {
    -                "enabled": True,
    -                "materialized": "view",
    -                "persist_docs": {},
    -                "post-hook": [],
    -                "pre-hook": [],
    -                "vars": {},
    -                "quoting": {},
    -                "column_types": {},
    -                "tags": [],
    -            }
    -        ),
    -        tags=[],
    -        path="model_one.sql",
    -        raw_code="",
    -        description="",
    -        columns={},
    -        checksum=FileHash.from_contents(""),
    -    )
    -
    -
     sample_values = [
    +    # N.B. Events instantiated here include the module prefix in order to
    +    # avoid having the entire list twice in the code.
         # A - pre-project loading
    -    MainReportVersion(version=""),
    -    MainReportArgs(args={}),
    -    MainTrackingUserState(user_state=""),
    -    MergedFromState(num_merged=0, sample=[]),
    -    MissingProfileTarget(profile_name="", target_name=""),
    -    InvalidVarsYAML(),
    -    DbtProjectError(),
    -    DbtProjectErrorException(exc=""),
    -    DbtProfileError(),
    -    DbtProfileErrorException(exc=""),
    -    ProfileListTitle(),
    -    ListSingleProfile(profile=""),
    -    NoDefinedProfiles(),
    -    ProfileHelpMessage(),
    -    StarterProjectPath(dir=""),
    -    ConfigFolderDirectory(dir=""),
    -    NoSampleProfileFound(adapter=""),
    -    ProfileWrittenWithSample(name="", path=""),
    -    ProfileWrittenWithTargetTemplateYAML(name="", path=""),
    -    ProfileWrittenWithProjectTemplateYAML(name="", path=""),
    -    SettingUpProfile(),
    -    InvalidProfileTemplateYAML(),
    -    ProjectNameAlreadyExists(name=""),
    -    ProjectCreated(project_name=""),
    -
    +    types.MainReportVersion(version=""),
    +    types.MainReportArgs(args={}),
    +    types.MainTrackingUserState(user_state=""),
    +    types.MergedFromState(num_merged=0, sample=[]),
    +    types.MissingProfileTarget(profile_name="", target_name=""),
    +    types.InvalidOptionYAML(option_name="vars"),
    +    types.LogDbtProjectError(),
    +    types.LogDbtProfileError(),
    +    types.StarterProjectPath(dir=""),
    +    types.ConfigFolderDirectory(dir=""),
    +    types.NoSampleProfileFound(adapter=""),
    +    types.ProfileWrittenWithSample(name="", path=""),
    +    types.ProfileWrittenWithTargetTemplateYAML(name="", path=""),
    +    types.ProfileWrittenWithProjectTemplateYAML(name="", path=""),
    +    types.SettingUpProfile(),
    +    types.InvalidProfileTemplateYAML(),
    +    types.ProjectNameAlreadyExists(name=""),
    +    types.ProjectCreated(project_name=""),
         # D - Deprecations ======================
    -    PackageRedirectDeprecation(old_name="", new_name=""),
    -    PackageInstallPathDeprecation(),
    -    ConfigSourcePathDeprecation(deprecated_path="", exp_path=""),
    -    ConfigDataPathDeprecation(deprecated_path="", exp_path=""),
    -    AdapterDeprecationWarning(old_name="", new_name=""),
    -    MetricAttributesRenamed(metric_name=""),
    -    ExposureNameDeprecation(exposure=""),
    -
    +    types.PackageRedirectDeprecation(old_name="", new_name=""),
    +    types.PackageInstallPathDeprecation(),
    +    types.ConfigSourcePathDeprecation(deprecated_path="", exp_path=""),
    +    types.ConfigDataPathDeprecation(deprecated_path="", exp_path=""),
    +    types.AdapterDeprecationWarning(old_name="", new_name=""),
    +    types.MetricAttributesRenamed(metric_name=""),
    +    types.ExposureNameDeprecation(exposure=""),
    +    types.InternalDeprecation(name="", reason="", suggested_action="", version=""),
         # E - DB Adapter ======================
    -    AdapterEventDebug(),
    -    AdapterEventInfo(),
    -    AdapterEventWarning(),
    -    AdapterEventError(),
    -    NewConnection(conn_type="", conn_name=""),
    -    ConnectionReused(conn_name=""),
    -    ConnectionLeftOpenInCleanup(conn_name=""),
    -    ConnectionClosedInCleanup(conn_name=""),
    -    RollbackFailed(conn_name=""),
    -    ConnectionClosed(conn_name=""),
    -    ConnectionLeftOpen(conn_name=""),
    -    Rollback(conn_name=""),
    -    CacheMiss(conn_name="", database="", schema=""),
    -    ListRelations(database="", schema=""),
    -    ConnectionUsed(conn_type="", conn_name=""),
    -    SQLQuery(conn_name="", sql=""),
    -    SQLQueryStatus(status="", elapsed=0.1),
    -    SQLCommit(conn_name=""),
    -    ColTypeChange(
    -        orig_type="", new_type="", table=ReferenceKeyMsg(database="", schema="", identifier="")
    -    ),
    -    SchemaCreation(relation=ReferenceKeyMsg(database="", schema="", identifier="")),
    -    SchemaDrop(relation=ReferenceKeyMsg(database="", schema="", identifier="")),
    -    UncachedRelation(
    -        dep_key=ReferenceKeyMsg(database="", schema="", identifier=""),
    -        ref_key=ReferenceKeyMsg(database="", schema="", identifier=""),
    -    ),
    -    AddLink(
    -        dep_key=ReferenceKeyMsg(database="", schema="", identifier=""),
    -        ref_key=ReferenceKeyMsg(database="", schema="", identifier=""),
    +    types.AdapterEventDebug(),
    +    types.AdapterEventInfo(),
    +    types.AdapterEventWarning(),
    +    types.AdapterEventError(),
    +    types.NewConnection(conn_type="", conn_name=""),
    +    types.ConnectionReused(conn_name=""),
    +    types.ConnectionLeftOpenInCleanup(conn_name=""),
    +    types.ConnectionClosedInCleanup(conn_name=""),
    +    types.RollbackFailed(conn_name=""),
    +    types.ConnectionClosed(conn_name=""),
    +    types.ConnectionLeftOpen(conn_name=""),
    +    types.Rollback(conn_name=""),
    +    types.CacheMiss(conn_name="", database="", schema=""),
    +    types.ListRelations(database="", schema=""),
    +    types.ConnectionUsed(conn_type="", conn_name=""),
    +    types.SQLQuery(conn_name="", sql=""),
    +    types.SQLQueryStatus(status="", elapsed=0.1),
    +    types.SQLCommit(conn_name=""),
    +    types.ColTypeChange(
    +        orig_type="",
    +        new_type="",
    +        table=types.ReferenceKeyMsg(database="", schema="", identifier=""),
         ),
    -    AddRelation(relation=ReferenceKeyMsg(database="", schema="", identifier="")),
    -    DropMissingRelation(relation=ReferenceKeyMsg(database="", schema="", identifier="")),
    -    DropCascade(
    -        dropped=ReferenceKeyMsg(database="", schema="", identifier=""),
    -        consequences=[ReferenceKeyMsg(database="", schema="", identifier="")],
    +    types.SchemaCreation(relation=types.ReferenceKeyMsg(database="", schema="", identifier="")),
    +    types.SchemaDrop(relation=types.ReferenceKeyMsg(database="", schema="", identifier="")),
    +    types.CacheAction(
    +        action="adding_relation",
    +        ref_key=types.ReferenceKeyMsg(database="", schema="", identifier=""),
    +        ref_key_2=types.ReferenceKeyMsg(database="", schema="", identifier=""),
         ),
    -    DropRelation(dropped=ReferenceKeyMsg()),
    -    UpdateReference(
    -        old_key=ReferenceKeyMsg(database="", schema="", identifier=""),
    -        new_key=ReferenceKeyMsg(database="", schema="", identifier=""),
    -        cached_key=ReferenceKeyMsg(database="", schema="", identifier=""),
    -    ),
    -    TemporaryRelation(key=ReferenceKeyMsg(database="", schema="", identifier="")),
    -    RenameSchema(
    -        old_key=ReferenceKeyMsg(database="", schema="", identifier=""),
    -        new_key=ReferenceKeyMsg(database="", schema="", identifier=""),
    -    ),
    -    DumpBeforeAddGraph(dump=dict()),
    -    DumpAfterAddGraph(dump=dict()),
    -    DumpBeforeRenameSchema(dump=dict()),
    -    DumpAfterRenameSchema(dump=dict()),
    -    AdapterImportError(exc=""),
    -    PluginLoadError(exc_info=""),
    -    NewConnectionOpening(connection_state=""),
    -    CodeExecution(conn_name="", code_content=""),
    -    CodeExecutionStatus(status="", elapsed=0.1),
    -    CatalogGenerationError(exc=""),
    -    WriteCatalogFailure(num_exceptions=0),
    -    CatalogWritten(path=""),
    -    CannotGenerateDocs(),
    -    BuildingCatalog(),
    -    DatabaseErrorRunningHook(hook_type=""),
    -    HooksRunning(num_hooks=0, hook_type=""),
    -    HookFinished(stat_line="", execution="", execution_time=0),
    -
    +    types.CacheDumpGraph(before_after="before", action="rename", dump=dict()),
    +    types.AdapterImportError(exc=""),
    +    types.PluginLoadError(exc_info=""),
    +    types.NewConnectionOpening(connection_state=""),
    +    types.CodeExecution(conn_name="", code_content=""),
    +    types.CodeExecutionStatus(status="", elapsed=0.1),
    +    types.CatalogGenerationError(exc=""),
    +    types.WriteCatalogFailure(num_exceptions=0),
    +    types.CatalogWritten(path=""),
    +    types.CannotGenerateDocs(),
    +    types.BuildingCatalog(),
    +    types.DatabaseErrorRunningHook(hook_type=""),
    +    types.HooksRunning(num_hooks=0, hook_type=""),
    +    types.FinishedRunningStats(stat_line="", execution="", execution_time=0),
         # I - Project parsing ======================
    -    ParseCmdStart(),
    -    ParseCmdCompiling(),
    -    ParseCmdWritingManifest(),
    -    ParseCmdDone(),
    -    ManifestDependenciesLoaded(),
    -    ManifestLoaderCreated(),
    -    ManifestLoaded(),
    -    ManifestChecked(),
    -    ManifestFlatGraphBuilt(),
    -    ParseCmdPerfInfoPath(path=""),
    -    GenericTestFileParse(path=""),
    -    MacroFileParse(path=""),
    -    PartialParsingFullReparseBecauseOfError(),
    -    PartialParsingExceptionFile(file=""),
    -    PartialParsingFile(file_id=""),
    -    PartialParsingException(exc_info={}),
    -    PartialParsingSkipParsing(),
    -    PartialParsingMacroChangeStartFullParse(),
    -    PartialParsingProjectEnvVarsChanged(),
    -    PartialParsingProfileEnvVarsChanged(),
    -    PartialParsingDeletedMetric(unique_id=""),
    -    ManifestWrongMetadataVersion(version=""),
    -    PartialParsingVersionMismatch(saved_version="", current_version=""),
    -    PartialParsingFailedBecauseConfigChange(),
    -    PartialParsingFailedBecauseProfileChange(),
    -    PartialParsingFailedBecauseNewProjectDependency(),
    -    PartialParsingFailedBecauseHashChanged(),
    -    PartialParsingNotEnabled(),
    -    ParsedFileLoadFailed(path="", exc="", exc_info=""),
    -    PartialParseSaveFileNotFound(),
    -    StaticParserCausedJinjaRendering(path=""),
    -    UsingExperimentalParser(path=""),
    -    SampleFullJinjaRendering(path=""),
    -    StaticParserFallbackJinjaRendering(path=""),
    -    StaticParsingMacroOverrideDetected(path=""),
    -    StaticParserSuccess(path=""),
    -    StaticParserFailure(path=""),
    -    ExperimentalParserSuccess(path=""),
    -    ExperimentalParserFailure(path=""),
    -    PartialParsingEnabled(deleted=0, added=0, changed=0),
    -    PartialParsingAddedFile(file_id=""),
    -    PartialParsingDeletedFile(file_id=""),
    -    PartialParsingUpdatedFile(file_id=""),
    -    PartialParsingNodeMissingInSourceFile(file_id=""),
    -    PartialParsingMissingNodes(file_id=""),
    -    PartialParsingChildMapMissingUniqueID(unique_id=""),
    -    PartialParsingUpdateSchemaFile(file_id=""),
    -    PartialParsingDeletedSource(unique_id=""),
    -    PartialParsingDeletedExposure(unique_id=""),
    -    InvalidDisabledTargetInTestNode(
    +    types.ParseCmdOut(msg="testing"),
    +    types.ParseCmdPerfInfoPath(path=""),
    +    types.GenericTestFileParse(path=""),
    +    types.MacroFileParse(path=""),
    +    types.PartialParsingErrorProcessingFile(file=""),
    +    types.PartialParsingFile(file_id=""),
    +    types.PartialParsingError(exc_info={}),
    +    types.PartialParsingSkipParsing(),
    +    types.UnableToPartialParse(reason="something went wrong"),
    +    types.StateCheckVarsHash(vars="testing", target="testing", profile="testing"),
    +    types.PartialParsingNotEnabled(),
    +    types.ParsedFileLoadFailed(path="", exc="", exc_info=""),
    +    types.PartialParsingEnabled(deleted=0, added=0, changed=0),
    +    types.PartialParsingFile(file_id=""),
    +    types.InvalidDisabledTargetInTestNode(
             resource_type_title="",
             unique_id="",
             original_file_path="",
    @@ -289,16 +192,18 @@ def MockNode():
             target_name="",
             target_package="",
         ),
    -    UnusedResourceConfigPath(unused_config_paths=[]),
    -    SeedIncreased(package_name="", name=""),
    -    SeedExceedsLimitSamePath(package_name="", name=""),
    -    SeedExceedsLimitAndPathChanged(package_name="", name=""),
    -    SeedExceedsLimitChecksumChanged(package_name="", name="", checksum_name=""),
    -    UnusedTables(unused_tables=[]),
    -    WrongResourceSchemaFile(patch_name="", resource_type="", file_path="", plural_resource_type=""),
    -    NoNodeForYamlKey(patch_name="", yaml_key="", file_path=""),
    -    MacroPatchNotFound(patch_name=""),
    -    NodeNotFoundOrDisabled(
    +    types.UnusedResourceConfigPath(unused_config_paths=[]),
    +    types.SeedIncreased(package_name="", name=""),
    +    types.SeedExceedsLimitSamePath(package_name="", name=""),
    +    types.SeedExceedsLimitAndPathChanged(package_name="", name=""),
    +    types.SeedExceedsLimitChecksumChanged(package_name="", name="", checksum_name=""),
    +    types.UnusedTables(unused_tables=[]),
    +    types.WrongResourceSchemaFile(
    +        patch_name="", resource_type="", file_path="", plural_resource_type=""
    +    ),
    +    types.NoNodeForYamlKey(patch_name="", yaml_key="", file_path=""),
    +    types.MacroNotFoundForPatch(patch_name=""),
    +    types.NodeNotFoundOrDisabled(
             original_file_path="",
             unique_id="",
             resource_type_title="",
    @@ -307,63 +212,58 @@ def MockNode():
             target_package="",
             disabled="",
         ),
    -    JinjaLogWarning(),
    -
    +    types.JinjaLogWarning(),
    +    types.JinjaLogInfo(msg=""),
    +    types.JinjaLogDebug(msg=""),
         # M - Deps generation ======================
    -
    -    GitSparseCheckoutSubdirectory(subdir=""),
    -    GitProgressCheckoutRevision(revision=""),
    -    GitProgressUpdatingExistingDependency(dir=""),
    -    GitProgressPullingNewDependency(dir=""),
    -    GitNothingToDo(sha=""),
    -    GitProgressUpdatedCheckoutRange(start_sha="", end_sha=""),
    -    GitProgressCheckedOutAt(end_sha=""),
    -    RegistryProgressGETRequest(url=""),
    -    RegistryProgressGETResponse(url="", resp_code=1234),
    -    SelectorReportInvalidSelector(valid_selectors="", spec_method="", raw_spec=""),
    -    JinjaLogInfo(msg=""),
    -    JinjaLogDebug(msg=""),
    -    DepsNoPackagesFound(),
    -    DepsStartPackageInstall(package_name=""),
    -    DepsInstallInfo(version_name=""),
    -    DepsUpdateAvailable(version_latest=""),
    -    DepsUpToDate(),
    -    DepsListSubdirectory(subdirectory=""),
    -    DepsNotifyUpdatesAvailable(packages=ListOfStrings()),
    -    RetryExternalCall(attempt=0, max=0),
    -    RecordRetryException(exc=""),
    -    RegistryIndexProgressGETRequest(url=""),
    -    RegistryIndexProgressGETResponse(url="", resp_code=1234),
    -    RegistryResponseUnexpectedType(response=""),
    -    RegistryResponseMissingTopKeys(response=""),
    -    RegistryResponseMissingNestedKeys(response=""),
    -    RegistryResponseExtraNestedKeys(response=""),
    -    DepsSetDownloadDirectory(path=""),
    -
    +    types.GitSparseCheckoutSubdirectory(subdir=""),
    +    types.GitProgressCheckoutRevision(revision=""),
    +    types.GitProgressUpdatingExistingDependency(dir=""),
    +    types.GitProgressPullingNewDependency(dir=""),
    +    types.GitNothingToDo(sha=""),
    +    types.GitProgressUpdatedCheckoutRange(start_sha="", end_sha=""),
    +    types.GitProgressCheckedOutAt(end_sha=""),
    +    types.RegistryProgressGETRequest(url=""),
    +    types.RegistryProgressGETResponse(url="", resp_code=1234),
    +    types.SelectorReportInvalidSelector(valid_selectors="", spec_method="", raw_spec=""),
    +    types.DepsNoPackagesFound(),
    +    types.DepsStartPackageInstall(package_name=""),
    +    types.DepsInstallInfo(version_name=""),
    +    types.DepsUpdateAvailable(version_latest=""),
    +    types.DepsUpToDate(),
    +    types.DepsListSubdirectory(subdirectory=""),
    +    types.DepsNotifyUpdatesAvailable(packages=types.ListOfStrings()),
    +    types.RetryExternalCall(attempt=0, max=0),
    +    types.RecordRetryException(exc=""),
    +    types.RegistryIndexProgressGETRequest(url=""),
    +    types.RegistryIndexProgressGETResponse(url="", resp_code=1234),
    +    types.RegistryResponseUnexpectedType(response=""),
    +    types.RegistryResponseMissingTopKeys(response=""),
    +    types.RegistryResponseMissingNestedKeys(response=""),
    +    types.RegistryResponseExtraNestedKeys(response=""),
    +    types.DepsSetDownloadDirectory(path=""),
         # Q - Node execution ======================
    -
    -    RunningOperationCaughtError(exc=""),
    -    CompileComplete(),
    -    FreshnessCheckComplete(),
    -    SeedHeader(header=""),
    -    SeedHeaderSeparator(len_header=0),
    -    SQLRunnerException(exc=""),
    -    LogTestResult(
    +    types.RunningOperationCaughtError(exc=""),
    +    types.CompileComplete(),
    +    types.FreshnessCheckComplete(),
    +    types.SeedHeader(header=""),
    +    types.SQLRunnerException(exc=""),
    +    types.LogTestResult(
             name="",
             index=0,
             num_models=0,
             execution_time=0,
             num_failures=0,
         ),
    -    LogStartLine(description="", index=0, total=0, node_info=NodeInfo()),
    -    LogModelResult(
    +    types.LogStartLine(description="", index=0, total=0, node_info=types.NodeInfo()),
    +    types.LogModelResult(
             description="",
             status="",
             index=0,
             total=0,
             execution_time=0,
         ),
    -    LogSnapshotResult(
    +    types.LogSnapshotResult(
             status="",
             description="",
             cfg={},
    @@ -371,7 +271,7 @@ def MockNode():
             total=0,
             execution_time=0,
         ),
    -    LogSeedResult(
    +    types.LogSeedResult(
             status="",
             index=0,
             total=0,
    @@ -379,110 +279,106 @@ def MockNode():
             schema="",
             relation="",
         ),
    -    LogFreshnessResult(
    +    types.LogFreshnessResult(
             source_name="",
             table_name="",
             index=0,
             total=0,
             execution_time=0,
         ),
    -    LogCancelLine(conn_name=""),
    -    DefaultSelector(name=""),
    -    NodeStart(node_info=NodeInfo()),
    -    NodeFinished(node_info=NodeInfo()),
    -    QueryCancelationUnsupported(type=""),
    -    ConcurrencyLine(num_threads=0, target_name=""),
    -    WritingInjectedSQLForNode(node_info=NodeInfo()),
    -    NodeCompiling(node_info=NodeInfo()),
    -    NodeExecuting(node_info=NodeInfo()),
    -    LogHookStartLine(
    +    types.LogCancelLine(conn_name=""),
    +    types.DefaultSelector(name=""),
    +    types.NodeStart(node_info=types.NodeInfo()),
    +    types.NodeFinished(node_info=types.NodeInfo()),
    +    types.QueryCancelationUnsupported(type=""),
    +    types.ConcurrencyLine(num_threads=0, target_name=""),
    +    types.WritingInjectedSQLForNode(node_info=types.NodeInfo()),
    +    types.NodeCompiling(node_info=types.NodeInfo()),
    +    types.NodeExecuting(node_info=types.NodeInfo()),
    +    types.LogHookStartLine(
             statement="",
             index=0,
             total=0,
         ),
    -    LogHookEndLine(
    +    types.LogHookEndLine(
             statement="",
             status="",
             index=0,
             total=0,
             execution_time=0,
         ),
    -    SkippingDetails(
    +    types.SkippingDetails(
             resource_type="",
             schema="",
             node_name="",
             index=0,
             total=0,
         ),
    -    NothingToDo(),
    -    RunningOperationUncaughtError(exc=""),
    -    EndRunResult(),
    -    NoNodesSelected(),
    -    DepsUnpinned(revision="", git=""),
    -    NoNodesForSelectionCriteria(spec_raw=""),
    -
    +    types.NothingToDo(),
    +    types.RunningOperationUncaughtError(exc=""),
    +    types.EndRunResult(),
    +    types.NoNodesSelected(),
    +    types.DepsUnpinned(revision="", git=""),
    +    types.NoNodesForSelectionCriteria(spec_raw=""),
         # W - Node testing ======================
    -
    -    CatchableExceptionOnRun(exc=""),
    -    InternalExceptionOnRun(build_path="", exc=""),
    -    GenericExceptionOnRun(build_path="", unique_id="", exc=""),
    -    NodeConnectionReleaseError(node_name="", exc=""),
    -    FoundStats(stat_line=""),
    -
    +    types.CatchableExceptionOnRun(exc=""),
    +    types.InternalErrorOnRun(build_path="", exc=""),
    +    types.GenericExceptionOnRun(build_path="", unique_id="", exc=""),
    +    types.NodeConnectionReleaseError(node_name="", exc=""),
    +    types.FoundStats(stat_line=""),
         # Z - misc ======================
    -
    -    MainKeyboardInterrupt(),
    -    MainEncounteredError(exc=""),
    -    MainStackTrace(stack_trace=""),
    -    SystemErrorRetrievingModTime(path=""),
    -    SystemCouldNotWrite(path="", reason="", exc=""),
    -    SystemExecutingCmd(cmd=[""]),
    -    SystemStdOutMsg(bmsg=b""),
    -    SystemStdErrMsg(bmsg=b""),
    -    SystemReportReturnCode(returncode=0),
    -    TimingInfoCollected(),
    -    LogDebugStackTrace(),
    -    CheckCleanPath(path=""),
    -    ConfirmCleanPath(path=""),
    -    ProtectedCleanPath(path=""),
    -    FinishedCleanPaths(),
    -    OpenCommand(open_cmd="", profiles_dir=""),
    -    EmptyLine(),
    -    RunResultWarning(resource_type="", node_name="", path=""),
    -    RunResultFailure(resource_type="", node_name="", path=""),
    -    StatsLine(stats={"error": 0, "skip": 0, "pass": 0, "warn": 0,"total": 0}),
    -    RunResultError(msg=""),
    -    RunResultErrorNoMessage(status=""),
    -    SQLCompiledPath(path=""),
    -    CheckNodeTestFailure(relation_name=""),
    -    FirstRunResultError(msg=""),
    -    AfterFirstRunResultError(msg=""),
    -    EndOfRunSummary(num_errors=0, num_warnings=0, keyboard_interrupt=False),
    -    LogSkipBecauseError(schema="", relation="", index=0, total=0),
    -    EnsureGitInstalled(),
    -    DepsCreatingLocalSymlink(),
    -    DepsSymlinkNotAvailable(),
    -    DisableTracking(),
    -    SendingEvent(kwargs=""),
    -    SendEventFailure(),
    -    FlushEvents(),
    -    FlushEventsFailure(),
    -    TrackingInitializeFailure(),
    -    RunResultWarningMessage(),
    -
    +    types.MainKeyboardInterrupt(),
    +    types.MainEncounteredError(exc=""),
    +    types.MainStackTrace(stack_trace=""),
    +    types.SystemErrorRetrievingModTime(path=""),
    +    types.SystemCouldNotWrite(path="", reason="", exc=""),
    +    types.SystemExecutingCmd(cmd=[""]),
    +    types.SystemStdOut(bmsg=b""),
    +    types.SystemStdErr(bmsg=b""),
    +    types.SystemReportReturnCode(returncode=0),
    +    types.TimingInfoCollected(),
    +    types.LogDebugStackTrace(),
    +    types.CheckCleanPath(path=""),
    +    types.ConfirmCleanPath(path=""),
    +    types.ProtectedCleanPath(path=""),
    +    types.FinishedCleanPaths(),
    +    types.OpenCommand(open_cmd="", profiles_dir=""),
    +    types.RunResultWarning(resource_type="", node_name="", path=""),
    +    types.RunResultFailure(resource_type="", node_name="", path=""),
    +    types.StatsLine(stats={"error": 0, "skip": 0, "pass": 0, "warn": 0, "total": 0}),
    +    types.RunResultError(msg=""),
    +    types.RunResultErrorNoMessage(status=""),
    +    types.SQLCompiledPath(path=""),
    +    types.CheckNodeTestFailure(relation_name=""),
    +    types.FirstRunResultError(msg=""),
    +    types.AfterFirstRunResultError(msg=""),
    +    types.EndOfRunSummary(num_errors=0, num_warnings=0, keyboard_interrupt=False),
    +    types.LogSkipBecauseError(schema="", relation="", index=0, total=0),
    +    types.EnsureGitInstalled(),
    +    types.DepsCreatingLocalSymlink(),
    +    types.DepsSymlinkNotAvailable(),
    +    types.DisableTracking(),
    +    types.SendingEvent(kwargs=""),
    +    types.SendEventFailure(),
    +    types.FlushEvents(),
    +    types.FlushEventsFailure(),
    +    types.Formatting(),
    +    types.TrackingInitializeFailure(),
    +    types.RunResultWarningMessage(),
    +    types.DebugCmdOut(),
    +    types.DebugCmdResult(),
    +    types.ListCmdOut(),
    +    types.Note(msg="This is a note."),
         # T - tests ======================
    -    IntegrationTestInfo(),
    -    IntegrationTestDebug(),
    -    IntegrationTestWarn(),
    -    IntegrationTestError(),
    -    IntegrationTestException(),
    -    UnitTestInfo(),
    -
    +    test_types.IntegrationTestInfo(),
    +    test_types.IntegrationTestDebug(),
    +    test_types.IntegrationTestWarn(),
    +    test_types.IntegrationTestError(),
    +    test_types.IntegrationTestException(),
    +    test_types.UnitTestInfo(),
     ]
     
     
    -
    -
     class TestEventJSONSerialization:
     
         # attempts to test that every event is serializable to json.
    @@ -496,7 +392,7 @@ def test_all_serializable(self):
             diff = all_non_abstract_events.difference(set(all_event_values_list))
             assert (
                 not diff
    -        ), f"test is missing concrete values in `sample_values`. Please add the values for the aforementioned event classes"
    +        ), f"{diff}test is missing concrete values in `sample_values`. Please add the values for the aforementioned event classes"
     
             # make sure everything in the list is a value not a type
             for event in sample_values:
    @@ -504,11 +400,26 @@ def test_all_serializable(self):
     
             # if we have everything we need to test, try to serialize everything
             for event in sample_values:
    -            event_dict = event_to_dict(event)
    +            msg = msg_from_base_event(event)
    +            try:
    +                msg_to_dict(msg)
    +            except Exception as e:
    +                raise Exception(
    +                    f"{event} can not be converted to a dict. Originating exception: {e}"
    +                )
                 try:
    -                event_json = event_to_json(event)
    +                msg_to_json(msg)
                 except Exception as e:
                     raise Exception(f"{event} is not serializable to json. Originating exception: {e}")
     
     
     T = TypeVar("T")
    +
    +
    +def test_date_serialization():
    +    ti = TimingInfo("test")
    +    ti.begin()
    +    ti.end()
    +    ti_dict = ti.to_dict()
    +    assert ti_dict["started_at"].endswith("Z")
    +    assert ti_dict["completed_at"].endswith("Z")
    diff --git a/tests/unit/test_functions.py b/tests/unit/test_functions.py
    new file mode 100644
    index 00000000000..a43361a7e94
    --- /dev/null
    +++ b/tests/unit/test_functions.py
    @@ -0,0 +1,45 @@
    +from argparse import Namespace
    +import pytest
    +
    +import dbt.flags as flags
    +from dbt.events.functions import warn_or_error
    +from dbt.events.types import NoNodesForSelectionCriteria
    +from dbt.exceptions import EventCompilationError
    +
    +
    +@pytest.mark.parametrize(
    +    "warn_error_options,expect_compilation_exception",
    +    [
    +        ('{"include": "all"}', True),
    +        ('{"include": [NoNodesForSelectionCriteria]}', True),
    +        ('{"include": []}', False),
    +        ("{}", False),
    +        ('{"include": [MainTrackingUserState]}', False),
    +        ('{"include": "all", "exclude": [NoNodesForSelectionCriteria]}', False),
    +    ],
    +)
    +def test_warn_or_error_warn_error_options(warn_error_options, expect_compilation_exception):
    +    args = Namespace(warn_error_options=warn_error_options)
    +    flags.set_from_args(args, {})
    +    if expect_compilation_exception:
    +        with pytest.raises(EventCompilationError):
    +            warn_or_error(NoNodesForSelectionCriteria())
    +    else:
    +        warn_or_error(NoNodesForSelectionCriteria())
    +
    +
    +@pytest.mark.parametrize(
    +    "warn_error,expect_compilation_exception",
    +    [
    +        (True, True),
    +        (False, False),
    +    ],
    +)
    +def test_warn_or_error_warn_error(warn_error, expect_compilation_exception):
    +    args = Namespace(warn_error=warn_error)
    +    flags.set_from_args(args, {})
    +    if expect_compilation_exception:
    +        with pytest.raises(EventCompilationError):
    +            warn_or_error(NoNodesForSelectionCriteria())
    +    else:
    +        warn_or_error(NoNodesForSelectionCriteria())
    diff --git a/tests/unit/test_helper_types.py b/tests/unit/test_helper_types.py
    new file mode 100644
    index 00000000000..f0aa077b46e
    --- /dev/null
    +++ b/tests/unit/test_helper_types.py
    @@ -0,0 +1,45 @@
    +import pytest
    +
    +from dbt.helper_types import IncludeExclude, WarnErrorOptions
    +from dbt.dataclass_schema import ValidationError
    +
    +
    +class TestIncludeExclude:
    +    def test_init_invalid(self):
    +        with pytest.raises(ValidationError):
    +            IncludeExclude(include="invalid")
    +
    +        with pytest.raises(ValidationError):
    +            IncludeExclude(include=["ItemA"], exclude=["ItemB"])
    +
    +    @pytest.mark.parametrize(
    +        "include,exclude,expected_includes",
    +        [
    +            ("all", [], True),
    +            ("*", [], True),
    +            ("*", ["ItemA"], False),
    +            (["ItemA"], [], True),
    +            (["ItemA", "ItemB"], [], True),
    +        ],
    +    )
    +    def test_includes(self, include, exclude, expected_includes):
    +        include_exclude = IncludeExclude(include=include, exclude=exclude)
    +
    +        assert include_exclude.includes("ItemA") == expected_includes
    +
    +
    +class TestWarnErrorOptions:
    +    def test_init(self):
    +        with pytest.raises(ValidationError):
    +            WarnErrorOptions(include=["InvalidError"])
    +
    +        with pytest.raises(ValidationError):
    +            WarnErrorOptions(include="*", exclude=["InvalidError"])
    +
    +        warn_error_options = WarnErrorOptions(include=["NoNodesForSelectionCriteria"])
    +        assert warn_error_options.include == ["NoNodesForSelectionCriteria"]
    +        assert warn_error_options.exclude == []
    +
    +        warn_error_options = WarnErrorOptions(include="*", exclude=["NoNodesForSelectionCriteria"])
    +        assert warn_error_options.include == "*"
    +        assert warn_error_options.exclude == ["NoNodesForSelectionCriteria"]
    diff --git a/tests/unit/test_proto_events.py b/tests/unit/test_proto_events.py
    index d5b070c41e2..2b03cac453a 100644
    --- a/tests/unit/test_proto_events.py
    +++ b/tests/unit/test_proto_events.py
    @@ -1,4 +1,3 @@
    -import sys
     from dbt.events.types import (
         MainReportVersion,
         MainReportArgs,
    @@ -8,78 +7,104 @@
         LogStartLine,
         LogTestResult,
     )
    -from dbt.events.functions import event_to_dict, LOG_VERSION, reset_metadata_vars, info
    -from dbt.events import proto_types as pl
    +from dbt.events.functions import msg_to_dict, LOG_VERSION, reset_metadata_vars
    +from dbt.events import proto_types as pt
    +from dbt.events.base_types import msg_from_base_event, EventLevel
     from dbt.version import installed
     
     
    -info_keys = {"name", "code", "msg", "level", "invocation_id", "pid", "thread", "ts", "extra", "category"}
    +info_keys = {
    +    "name",
    +    "code",
    +    "msg",
    +    "level",
    +    "invocation_id",
    +    "pid",
    +    "thread",
    +    "ts",
    +    "extra",
    +    "category",
    +}
     
     
     def test_events():
     
         # A001 event
         event = MainReportVersion(version=str(installed), log_version=LOG_VERSION)
    -    event_dict = event_to_dict(event)
    -    event_json = event.to_json()
    -    serialized = bytes(event)
    +    msg = msg_from_base_event(event)
    +    msg_dict = msg_to_dict(msg)
    +    msg_json = msg.to_json()
    +    serialized = bytes(msg)
         assert "Running with dbt=" in str(serialized)
    -    assert set(event_dict.keys()) == {"version", "info", "log_version"}
    -    assert set(event_dict["info"].keys()) == info_keys
    -    assert event_json
    -    assert event.info.code == "A001"
    +    assert set(msg_dict.keys()) == {"info", "data"}
    +    assert set(msg_dict["data"].keys()) == {"version", "log_version"}
    +    assert set(msg_dict["info"].keys()) == info_keys
    +    assert msg_json
    +    assert msg.info.code == "A001"
     
         # Extract EventInfo from serialized message
    -    generic_event = pl.GenericMessage().parse(serialized)
    +    generic_event = pt.GenericMessage().parse(serialized)
         assert generic_event.info.code == "A001"
         # get the message class for the real message from the generic message
    -    message_class = getattr(sys.modules["dbt.events.proto_types"], generic_event.info.name)
    -    new_event = message_class().parse(serialized)
    -    assert new_event.info.code == event.info.code
    -    assert new_event.version == event.version
    +    message_class = getattr(pt, f"{generic_event.info.name}Msg")
    +    new_msg = message_class().parse(serialized)
    +    assert new_msg.info.code == msg.info.code
    +    assert new_msg.data.version == msg.data.version
     
         # A002 event
         event = MainReportArgs(args={"one": "1", "two": "2"})
    -    event_dict = event_to_dict(event)
    -    event_json = event.to_json()
    +    msg = msg_from_base_event(event)
    +    msg_dict = msg_to_dict(msg)
    +    msg_json = msg.to_json()
     
    -    assert set(event_dict.keys()) == {"info", "args"}
    -    assert set(event_dict["info"].keys()) == info_keys
    -    assert event_json
    -    assert event.info.code == "A002"
    +    assert set(msg_dict.keys()) == {"info", "data"}
    +    assert set(msg_dict["data"].keys()) == {"args"}
    +    assert set(msg_dict["info"].keys()) == info_keys
    +    assert msg_json
    +    assert msg.info.code == "A002"
     
     
     def test_exception_events():
         event = RollbackFailed(conn_name="test", exc_info="something failed")
    -    event_dict = event_to_dict(event)
    -    event_json = event.to_json()
    -    assert set(event_dict.keys()) == {"info", "conn_name", "exc_info"}
    -    assert set(event_dict["info"].keys()) == info_keys
    -    assert event_json
    -    assert event.info.code == "E009"
    +    msg = msg_from_base_event(event)
    +    msg_dict = msg_to_dict(msg)
    +    msg_json = msg.to_json()
    +    assert set(msg_dict.keys()) == {"info", "data"}
    +    assert set(msg_dict["data"].keys()) == {"conn_name", "exc_info"}
    +    assert set(msg_dict["info"].keys()) == info_keys
    +    assert msg_json
    +    assert msg.info.code == "E009"
     
         event = PluginLoadError(exc_info="something failed")
    -    event_dict = event_to_dict(event)
    -    event_json = event.to_json()
    -    assert set(event_dict.keys()) == {"info", "exc_info"}
    -    assert set(event_dict["info"].keys()) == info_keys
    -    assert event_json
    -    assert event.info.code == "E036"
    -    # This event has no "msg"/"message"
    -    assert event.info.msg is None
    +    msg = msg_from_base_event(event)
    +    msg_dict = msg_to_dict(msg)
    +    msg_json = msg.to_json()
    +    assert set(msg_dict["data"].keys()) == {"exc_info"}
    +    assert set(msg_dict["info"].keys()) == info_keys
    +    assert msg_json
    +    assert msg.info.code == "E036"
    +    assert msg.info.msg == "something failed"
     
         # Z002 event
         event = MainEncounteredError(exc="Rollback failed")
    -    event_dict = event_to_dict(event)
    -    event_json = event.to_json()
    +    msg = msg_from_base_event(event)
    +    msg_dict = msg_to_dict(msg)
    +    msg_json = msg.to_json()
     
    -    assert set(event_dict.keys()) == {"info", "exc"}
    -    assert set(event_dict["info"].keys()) == info_keys
    -    assert event_json
    -    assert event.info.code == "Z002"
    +    assert set(msg_dict["data"].keys()) == {"exc"}
    +    assert set(msg_dict["info"].keys()) == info_keys
    +    assert msg_json
    +    assert msg.info.code == "Z002"
     
     
     def test_node_info_events():
    +    meta_dict = {
    +        "string-key1": ["value1", 2],
    +        "string-key2": {"nested-dict-key": "value2"},
    +        1: "value-from-non-string-key",
    +        "string-key3": 1,
    +        "string-key4": ["string1", 1, "string2", 2],
    +    }
         node_info = {
             "node_path": "some_path",
             "node_name": "some_name",
    @@ -89,15 +114,17 @@ def test_node_info_events():
             "node_status": "started",
             "node_started_at": "some_time",
             "node_finished_at": "another_time",
    +        "meta": meta_dict,
         }
         event = LogStartLine(
             description="some description",
             index=123,
             total=111,
    -        node_info=pl.NodeInfo(**node_info),
    +        node_info=pt.NodeInfo(**node_info),
         )
         assert event
         assert event.node_info.node_path == "some_path"
    +    assert event.node_info.meta == meta_dict
     
     
     def test_extra_dict_on_event(monkeypatch):
    @@ -107,31 +134,26 @@ def test_extra_dict_on_event(monkeypatch):
         reset_metadata_vars()
     
         event = MainReportVersion(version=str(installed), log_version=LOG_VERSION)
    -    event_dict = event_to_dict(event)
    -    assert set(event_dict["info"].keys()) == info_keys
    -    assert event.info.extra == {"env_key": "env_value"}
    -    serialized = bytes(event)
    +    msg = msg_from_base_event(event)
    +    msg_dict = msg_to_dict(msg)
    +    assert set(msg_dict["info"].keys()) == info_keys
    +    assert msg.info.extra == {"env_key": "env_value"}
    +    serialized = bytes(msg)
     
         # Extract EventInfo from serialized message
    -    generic_event = pl.GenericMessage().parse(serialized)
    +    generic_event = pt.GenericMessage().parse(serialized)
         assert generic_event.info.code == "A001"
         # get the message class for the real message from the generic message
    -    message_class = getattr(sys.modules["dbt.events.proto_types"], generic_event.info.name)
    -    new_event = message_class().parse(serialized)
    -    assert new_event.info.extra == event.info.extra
    +    message_class = getattr(pt, f"{generic_event.info.name}Msg")
    +    new_msg = message_class().parse(serialized)
    +    assert new_msg.info.extra == msg.info.extra
     
         # clean up
         reset_metadata_vars()
     
     
     def test_dynamic_level_events():
    -    event = LogTestResult(
    -        name="model_name",
    -        info=info(level=LogTestResult.status_to_level("pass")),
    -        status="pass",
    -        index=1,
    -        num_models=3,
    -        num_failures=0
    -    )
    -    assert event
    -    assert event.info.level == "info"
    +    event = LogTestResult(name="model_name", status="pass", index=1, num_models=3, num_failures=0)
    +    msg = msg_from_base_event(event, level=EventLevel.INFO)
    +    assert msg
    +    assert msg.info.level == "info"
    
    From 0a74594d09f2c82221a2d189ec1a3352b332915f Mon Sep 17 00:00:00 2001
    From: Chenyu Li 
    Date: Mon, 30 Jan 2023 11:55:31 -0800
    Subject: [PATCH 30/54] move favor state arg to click (#6774)
    
    ---
     core/dbt/cli/main.py                          |   6 +++
     core/dbt/cli/params.py                        |   6 +++
     .../docs/build/doctrees/environment.pickle    | Bin 205777 -> 212413 bytes
     core/dbt/docs/build/doctrees/index.doctree    | Bin 98382 -> 102131 bytes
     core/dbt/docs/build/html/index.html           |  35 +++++++++++++++---
     core/dbt/docs/build/html/searchindex.js       |   2 +-
     .../defer_state/test_defer_state.py           |   3 +-
     7 files changed, 45 insertions(+), 7 deletions(-)
    
    diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py
    index 44a911c3784..c4aa4af5a0a 100644
    --- a/core/dbt/cli/main.py
    +++ b/core/dbt/cli/main.py
    @@ -103,6 +103,7 @@ def cli(ctx, **kwargs):
     @p.defer
     @p.exclude
     @p.fail_fast
    +@p.favor_state
     @p.full_refresh
     @p.indirect_selection
     @p.profile
    @@ -170,6 +171,7 @@ def docs(ctx, **kwargs):
     @p.compile_docs
     @p.defer
     @p.exclude
    +@p.favor_state
     @p.models
     @p.profile
     @p.profiles_dir
    @@ -233,6 +235,7 @@ def docs_serve(ctx, **kwargs):
     @click.pass_context
     @p.defer
     @p.exclude
    +@p.favor_state
     @p.full_refresh
     @p.models
     @p.parse_only
    @@ -401,6 +404,7 @@ def parse(ctx, **kwargs):
     @cli.command("run")
     @click.pass_context
     @p.defer
    +@p.favor_state
     @p.exclude
     @p.fail_fast
     @p.full_refresh
    @@ -503,6 +507,7 @@ def seed(ctx, **kwargs):
     @click.pass_context
     @p.defer
     @p.exclude
    +@p.favor_state
     @p.models
     @p.profile
     @p.profiles_dir
    @@ -583,6 +588,7 @@ def freshness(ctx, **kwargs):
     @p.defer
     @p.exclude
     @p.fail_fast
    +@p.favor_state
     @p.indirect_selection
     @p.models
     @p.profile
    diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py
    index 1dbc5bffd8d..915769c845d 100644
    --- a/core/dbt/cli/params.py
    +++ b/core/dbt/cli/params.py
    @@ -92,6 +92,12 @@
         help="Stop execution on first failure.",
     )
     
    +favor_state = click.option(
    +    "--favor-state/--no-favor-state",
    +    envvar="DBT_FAVOR_STATE",
    +    help="If set, defer to the argument provided to the state flag for resolving unselected nodes, even if the node(s) exist as a database object in the current environment.",
    +)
    +
     full_refresh = click.option(
         "--full-refresh",
         "-f",
    diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle
    index b4efa645e9d65c43bc0f22b1e035172c83a3a4c2..772dcf43eb8521caad6e3c528f9c036d3b2902ac 100644
    GIT binary patch
    literal 212413
    zcmeHw3!EHPnQsVr%{wzmNO(zOgh?=y@Q_tegb)zeKtK{ygxJ({*GyNZr>m)|PBKDN
    zu8R$)zhtczaPi_cY5bn(Syy$--tRl7s!x4Yrz+j2
    z&vD{U_-SUQPSscc^FROZJiha&S1!GC$)Y0`;eV|Ozfp1O`#0P5y^iPB=j?jWZmm48
    z>D0;>#4g>@-gsmCKzl>$NUPCk?`RE{-BRG$cG>aTt#h|j+&O!T*R0oVZ_7T{+q0!S
    z9ZZ#6&lbN#e{F#${Vmh@0{j(?`0ds*-zza&&+6Tl?>4=X4K_+{ea4w>w?@J|MSm~b
    z#uGZ37;yL6p68U|%?H}8rM?|BS*Gm$fnE0xvOtORqlVo7+NQ81?34UM&PN`#X)NVAV
    zizVML&Nwy5A-rRh-A68suuIk0O(UGgqTigEarU=cLnXIXgB&wDT0@t4W!tmM=R3dw
    z5P3zrwPdsBqgXuK54>h6XhL!T6t?}=NWD1+MNyn_y*Vpj!ddQfTae?O{I|kM$P(NJ
    z54Ky&pYP0WW|x)*)^rW7tk{J=*`>jfTehdBYc7;UYw!}s51jferZij2_FA@I8V`%E7%Qzs03R7(cdn}(UD<|rE(cj)@wY4I@kf_?
    zcFneYyK`Y_;FhiV&YuJ9&*IEn(B8qHSW&a;vrTIj@GrK(5JNBoz^$5H1ckHmSkC|U
    z+ZIn>$4Mwg&$5!MTUgvjUGKSWaA5QGQQCp)f#L(9;liv9GT9##ty<09SDb0S>}B)C
    zz_aQ;h>R87!PfHd9?WWMaE?i*2%ZPRcj{$(e`sn2zK30}T*FXRo?h9);f;mLu4bDr(XRkY+&*fr?9N$E
    zR7l6e({rF$H>kUTUG(f3(0jdP7yX6>)yqMSWSE)YuI)z`M;r6ASabSCKbU9PVse_O
    zK-Eyg>Y-Dzps|pEkoIwLkK5oy78Y*hxXSTdmSa&3QJjWK3#7G!Umw|H+l}Hr%d117
    zbij=J%|-)!(ibVa!qWQ^eI3#n;6ZTcqTO(6?rc;v>=HOo?9v1y
    z3hQog{NhZ_f_I1IG}-_TEpHxl#F%X@Zh*5e9~iDfYXJnY=BB-42dZ%;zZR=@4?qee
    z49{8h1~`#lYbSB@fg%$l_K0QZF}fx(bMDXx_upx7}azk)S>7J
    zaO{Yi!Jy_Bp@MN{=A%nPc&P;H@&Z4~$)8M)$$g;!qMC@A8Vvf^p
    z9eLgPP*pLySI5)CMg?2D4XuHwCSg_4;;IGB&I;BZL*)lNK^qV3L;c9=a=Xdx9naNS
    zg!Zdq`Hl}QR1JVv+!{0s9qo;g}dzNQi#s8J54yhm7{35=yqY-&JrpoRZiw_*Mc-8lwna4&E6#!fKY!IV-$g+~<_hj~4N#cy3s|
    z3EqtLu2Zi$bui1Akwf+XPl3)I)I-zF0J>OF$~b#w
    zpXXrr0NV>_KnI}@=DZAUwrc!^l@lvZ4eM3b2V$a&dQUuIB9;(S9XBAtfV~jvUl-~F24R?8nX&ddKtg!2GFjQc
    zQY#xPr|@6aSDwc2A7l4JE$dk{pKG{j=n@p0jWTpgp%rFb8TK09Mf5?J(-ycmG)wF<
    ze+U|6VA-u(H86=%h>}+^o4o$V5_L+f(qfNtAPpNjg&=@ESedcwtJyQ+dXZ(UwfNH?
    z3g5{)uyKvB=R$rPH75wgbC4V43E1B&hNA=WCZ
    z521i()P39XN)_x6#sHz)MbCys#sT5W5((TK10=eqA=q^s5B6F3d*InQY+fV49eu$K
    zyj=~S{28|~6mZHwwQr4LZv&W?m-2>?KK(_^Y+MQD31<#vGYzd=*
    z!s5+^@L~Rf5NoH-4Jin7$_R_s=ZrZ1+9;PeRs^p;kG1DvsbB}X%Yr7f;Kjw?%Gzq^>f!jti`raf_8CR2OW(HqW(h10u41Qz-?KKlr-oHlG|s^>_R-J3@{&smJGNeWLE*Uqqk^0=H4K
    z_u4h6Sy<;0hjlyn9jo~rsVY2fHrARiuDcUIHg
    zYZqc2*n&G#=tL1mE_M8pU4upv==ViEFcG~*;`S?}w6qsRDRQUnU>C%+Rz#7GP*H0l
    z6LrX<9av5c9JLv-vo1<4c`ig2I>C{S2JjU@#BQmA9mdx3Q_h}x`BR^MZCHDcuv_Z(
    zJ|qT`mo)1Td*C(uDwc*(o=*P)O|zB(kulTZ2%Pjg*d2rWAwCMRj3DR?t;b45r(A>}
    zDjOrf>p&>%-U*D9LTfKeTUjddUh*D$9^_wUMci5roz>a8g&AVMxif$r>Yo~1z}B#a
    z)%*aCh@3ih&RQ!l90B1_7(|bR!C9y`V2}@ijiBOzpB7mF5b7%iX>F;|;QiNhT!!5+
    z&Tf!qXEi3}WC&8B57eB6H@bUZfETG9AL9v36Bve%(vF63uG1(&aIbD*P`5a3mG(eD
    z(XYTA49|&;c?fOSi`cj{>(@a;hJ)gAF_>>e@wxEMH6UYfD!bNz*1ld2!*?)bhd6eW
    z!U6!RI)6qtLa$**b>reii?FR-Et1RjUX1BrSEIGmu1|sYbZ%QGZVO)*1sz&PoF;A?
    z6}RyrRL4NQV?>)m_JShuyNj$FtSn@fm>OeN@m8h=P;Z|)7Oe;c|0zvQ4e-Z
    z94&5{bL!Ar@#cy(=!!riAZUrP=0l6lHCwr?VZwLpcBT+UB7eR@M
    ztR5@w#4a@|1H21kA>eQVO^p4{Z9F`nzNO+c1E(e)
    ziLuqO%bywVJQF2|rwZal&_Bkh1axkgQ-L9C0Nx8-lMWRtK~!9%B6Nk3T6U4?a~vB6
    zdP{34q6?aYuoh(_Aa?7
    zfw>fz1_%(|+GL^=hFTykn#Al_gE`9WD_W})k1)NVt)mkORco)s<4g!+RU#EfVt9Q`
    z;ySMc+uM&!y&vbr;DID8KPcnnW!z4C`?H&^)nWaF@hae-$5h$}TNC6u48|e84O2N`
    z2BG)dS^>OZm<#)g8(O1mKm{#c7}0MnhU&95!oqY=^;>+}W(n#J#%wl3vZ1BLxeADW
    z0>1}aYf5m}9tiJ3!(W7U7$*>*)x4!O+<-X-6eX*|S`d@qV_vxIZ{`%>m}v{lGUA=F
    zp23dRus2hRJ$rKWEQFdY7`>ICf4SMQgBfN3^s)t=3@zbv#L}o!CqawKbZZnIVD(mU
    zKTb0uY1p~*o742_c+}Nw!^+e<;Y5Kur(4zIiU2)5YfD>HOw9;M$!6aYX$V~vD1hJRrR1fyKw7Qf4=m*
    zH~sRU4`W3e2X_#Q!HKKNCa6;Q^calyaGI3|=AoFO^Km3P!X53K+A#IHBAjI_x2tal
    zi`74ee^|l@gfOswVb%6o^M3m_m_RJzoC(HGkS}QFal)ZuqlDJ+@CGKN5*gCgSSWcA
    zL>()Bcr)D0y1zIH%$EcVc-Fo!$_CZNic$rp_Mw9XJuX&8SeN^o`Qw#mL{C-k1IU&0
    zsvm$K(7&&K2!6Jf@^*;a(>gAyWhec@*2^zW1X@D_{P`ujpoT}nB2A5J!i=5#)O*
    zuleA&6%bVye_&6Fjf!`}%=PlXf*ux3@>V|vFROkI|M>|1^LhLS0aU*L|Fq6iT@tZ{
    zRd{TczWPP5w0&`Ny82=Gg%@112tH0zTAMC%_Z0%S;MDiJdtf93s~B+F(5jbxxK`l4
    zUw~B~ID%_O1@UQw+}opJX>bO<4$mhT*ul^v`PGLY6-ci>3_qb$@g@G_%lyY9{Kr?=
    zk7o5z_!*=2YxpOnw)$1P#;92W)G!SWfF!J0&uUaU3TH&MN>|`aZUN?e3ee&freVxo
    z04IwvWJUZGW;}N;K~TZ*@mAcS
    zdd~B|r0MAJ%$XSD^=PLaPVg}l%R+>^%3xz}ndgnZ^5;3PR4Cg2Ky%D%FfUMk?Bzpo
    zI=jxX-Mg~>8DEbM8dK3Sj)zJuQq~AeQ`jE#KKVKpTFZTiNd}193@eT{oW)m(K#&+1v%S3#FJ21W61H$;M{8NwSA>;Voh2d2AGE?@
    zJv9oiO{!@{hMIl^qKj4Gt(RZrHVY-IUa)GuTWCNl#Oi0yhWe<^*Jjz}0$(KqDNqY_
    zG_|P!)j4|1KBra-D<`Ok!kf_!tTeOW;h93%i!3x7yn4e*5C&FzHd75OG#KLYPeWE|
    zo!_bC6lxrq%THt!(iprtR!A?t9^!)QA^vA0j~EBUE+=C@bl}10+S!b13_jp38^UCa
    zSZ*p@e*jz@*WEzRe+m}@%!1q|H{!Fn1`0eB&EvE9Cw=z$AP;*}^lG%VQBptfx&ibM
    zR1obH+5`23Xb9kB{Sn@CcKDOM3Sr;{{3)qlHbG?r4P#7w_95OXkz`IK6;+wEqI$A0
    z7Pv6t74GRhSGd)upj)4S{}kY#gB4N1R-XzNax2x<8UC8AR4;hG>#>$_rU^Bpz2Cw>
    zFI1>yaH09IzGYn_?A2Y{S=|hR+Y;UkqvLC%x^V;VoNVgFDD6C*huO`j%BqhiT7;ty
    z35eran}M6Y`j2y7tx$`KejDbgIjyPhRgf2K(t2wXtf!a?+$r9TnSx3TBD;K81J|Z_
    zOFaeEARE?Lbd15Cj&=F53XGm+jV=p}cLqgPRWc^2JcA>ZjP+t&NgR-LKUN#h9vQP<
    zu_T=|l#bpa76aOuwBcAL*Y}jk(QUk(&MNSg17ygVOeEQRSsFz@{Q!}@eP}o_2so*y
    zOf7|o53DdDc?qwPEWFlK7H)~M&|##StP4kW!Nd-*4pDO(mWC3ni8O-wF1?sp_lphq
    z5Hqlx#JTO_nF2(~U>62*if)RXR19(3g}s*Nu>AzkD=$E&$*t{$K2)IzF+bep09_mw
    zqDu5;wOAya++&uLRCPDReB}nNvP5lG{V;&Zt)o!iJ;`}_xZr_`q;r)}@kzZ@2(SD&
    z8b+@?q@<%oyfR5gX$14bdNB)0BW8)DkKfs_OR!{|DVZr0WgU{75bGiCCHZ*FY(7#l
    z9N{vI)R7e8j{uHdAr8#oOvVf>6r)m%T_=S6OfMlqjQ@s)yAos1_3IH!vipPz9Wxxlxaf<~uo)M1
    z^kNn+ikKxX+C&#+eHmCO#`j6Hs9)KGCIzOmVVxB2o~adZ+fEI93*ypP81qxGP}8cF
    znz*Q!IamB&@+H(_+f9I8NW(Hs}!lnZJ5gCd8?ihj#yJ_g{ZF*S{UWzQpymTik
    zE|^KG6>ypX=Bz6gv~@6-WRbRJlXAf!Dz-5P+!A+L@SZ<&0Na7A&0Fp
    zSo7mH0#q)Jn=rT<(T?(kU_i~@T!4-qoS+c~Vqt!Sb@QU<@R?|IggcDT5yT5vGy&&?
    zp`p0wvP&=6vKc&N3oXvW1{EorRc6a33W$FKDGf3>-Mo{>qRS?3q&jNF|P|BOSBT9o1M>B3bS>ExlV;6DF=FTe66dWXk-=w
    z0MW;VW*JX;P8e5+D>>;nF9Jw<9cMJ^3bQ>@uwyAcUPvYqdWw4K5$hhEG=hlp9CL-_<;xK|@l?;he_
    z!u|Wq<|C!XVXnqd8%Y&@642;XA?#(sAQ2qoK)h94CTx68FB?LMe~X6ml}HDg1T9S>
    zh`*v2vCttRmgw-zUC<$jI4$5F6YRRf)&p4X>+|p^Ool)&KDx)p?mH!4(FV!)cg^NI
    zrQ6ZCZb3~+<^Bxt=vD3#+&xW+Pf0}
    z60qo1=~Bsrb9jxguthHmLZ_S1aK29CQ4b>0cmhFtwqCTNRz)E>Za1fH{8kl?$VQ{-y22-jJ*P?MX
    zCS`v$fYB@aQ8*I;&rsxxQ<8(OL?R)kq?Z`s2-l+Fd`C!B%ET8EX$13idNB)kK+F<%
    zI1Ls9ftYcY2q${r1S-F<0lCJI9v;WEF{-hnMG~}~RCu1`_Z4RIn-Xt0L`%pfsp8uJ
    zm0lGuIU7r(VQDvxr!t*;9D5jWf!_*{~Ta
    zie>ZqJPfH)zI%C)xG2?Xue^r96SMYIx)&TnG}GQx67)|;5*1W(~S(IiYG)?wltPqu*rBOo32
    z&PCISk&2_)71+TNpLN8Z$>jOx?JDltdc}no>?&Tqb=O6cxU$U&FoXhaZ?Nhm
    z2@fSJvZHNj$tBW=2h8eaMfhtO4d?r7V#G|ke2FxIxv3Yk@LI$y@!IJN>OOY)V6Gp|
    zxeAFcz|nKSHFor1hXI@>+gU)}8Cuf?WXv%O8z%8gU|4?3HaCa1Cg~9ft%`Ud$hhxU
    zf|E#I3YIrg4|$bYUQ-^jiU(S$a+9ud2Y~9vRgx!Ui9N@q^v2j^B9Ygd_0k|*qW=xa
    zorRyoS2|ZoHr{P28}CBH`JPR~3Ir@oB8We%7qRebL@e>^{?>4kT=i#)u<)Q+7E)d~
    z!u;cgD72FFzpntCZv3zN^APBoPeLP2BD8!%FD=3={}B!6%Q+r@Ab8^m1nrOXq7_m`
    zv=S*FHVZEaJ%d
    z(XehGJ3%j6;bVwa;$sW38j}>GQCXAkUaGAAjzR+His#R4s(g}NOWqs
    z$aR25uR6Pn!p-YtK`3)C8qQZ{JlICcG@d}p>}I`ag(?xPM3qlQ;b0H8&In=;QMfzJ
    zmUYTM;b2}k7V8FS6z-jXr5o=|h89+K&N{13kzztYQq@Xh|i3*DY~_Aygg
    z_y`)#uU-;?K~lpc(g^Ke(2H4AGKg7H$!Iy9jO0#>MZ(FWW;sbYBYfC|`Af!$XFmWq
    z-S}hop@z=cW`xMjRYJy3^pYX`@?X$!SEBBDGtaN}ViuxC%o0({H%9b-;@QvuP-J2X
    zn&%@W#IZ<-S!bRd3xM=WksKmLG>MS&BE94YC*6gH%{Zy8
    z7qf6u#4K^r1v=b`6yXA4AX@Rw@}6>})g4F5J7VfpfJ(0?E#oV;=rV-pB4OmUdKnSk
    za|;^o%6oe5b={>Gv+y3oEb$(BudDy9tM{1AM@o&Oks3pFBy02!10KCP9E3eR@O3;F
    z7Uohd#uJDhKdF}!p~sJ-;jZ-9bC>I3y_kg_5wk>(lYC1*?&Rcs^{7f$+LZSbLhVvClrxplU
    znnVyEF$jiDjP4MzM4$aF#vr-s&lF){%q$BjC3nWSSt~nE06=POP
    zLeNh`!})@aFW?|};|T=qnR?NR$`;W|#C+JSp&;}eHmFG1Y%^OnDHrQ>uR|A;ukF|g
    zsPy_7S;0Zdg`^N}uGY(q@Uj=9VclNl=tV2M4ADxwY$29bkb+wX07M`A%rc(xolcA=
    z-1;CrCGTbcrPp<$^OFeq(M3YYZF&h24%0%z`3@6KWc55UeVbm)!fg<<#BEHCOz$E)y+&MlT;ii=RTn`C6nMdxDlG5yW5Ci&!WT5lfV)
    zcq9_Zj-m~c?{AvTcS^Y{qw#8VTvTce(oX=9UhQ^!WX&)1av@avA80sVwekKtDdczp
    zDY>OXVARAY712r*Yw{!+l4FxEA`GrF%V0|To#h1bLu=Lp9KFJiR)G?Fql<)!r|YFc
    zNPhzw&X+!|l?hgwL=Zn)FJe()BVvjCds*R0^3cnJB!?H8&0$KOolZr@;?NfX9=-Y`
    z$9fRbND5)4sFxL?)N9aizEb0LF@YOTAZX|Gq7^Ddv=Ws*dEiIq01{FT*PAVelrMEg
    zi+M+WycR&|b*Rqa9z>m;tAvs}^im=m>Gfzh-;rWRdl00t>jdjN^kNlmgjgkRWcC~m
    zl4Y~6A}qeoEQ={8=tT6h4*mEf0MhFK-EBAcoL(Y?^#2wO=Sx3qDtb8d<12d63h^Ua
    ziTIz~Zm@9M4Zdr(tWz#J91X{%8xDR3X!QDL;%E<26DHC~)%7dAya5i`rnx|SYihcroZhIPav5&+f-)GLc{qUPQwhu!D$jf
    zyj?G1(JmumiGQC>zj_KM9`es&;j!`XL7t!j2l>Fkjd0Eneovbp;RuIwh4WQ(aZ{hR
    z2oG18ZuXNu_fR
    zM)1ZH2-^Gfq7_m`v=S*FHVcagJ%$IkS7z^#<;(P<6{-AKSzfrv@@}(bo$}8?
    z?w``Q@*H5%>z#?%F{#oLX{6R_>SaavtA~d3{Z%!t{3^Yeh1VixiP!$2#FcM1%WKL*
    zR)rpt8&!S>fa=Col2PT6ZSLG0e69N|_(W$T@fn0f8gYns=_NrpME^16PPC19cjqd}
    z#)nO1;{#|o->+#mkbtF01o4A<5eu(I#1gOWFK|Y3)t@QC!Xsu`NckZ2A{&0@S<>GH
    zG~GB~_ivK+ry+7p+h-qLnE5unBe%dJY>@
    zq->UtLV=r@ljdcU@-GM^2>+5t;MM{py-r5rY@|>~3L)kMy~GF?I}Q!&cCjhFXoZU*
    zT8WD-L;#Ew+(G~#`go36##4?1lbqafG6Qhi0g+z6iDGSp`RF2HW2as=gtuIdhV#9J
    z_Mr$?nnVy=dJzkcLBtY|=_QUv^3cnJB!{lq9HtaH#1xtle|rUh(JRjG;%_(WB|!-D
    zPtb6_Fnf=`-KiI?5GA6Oi1Nw!+rq`)?lxQ2Dfb*;?kNqw-3L&*an78Wf?LXtDPS6@
    znLeU7>%u9sVhY*7ow}3w%BTp2urvbvv%hQLy$vK;*Pw3AB@V8_fuu+x8G6)IhW;K6
    z=U0!kaVBjZO(KZDuNSeXA`x+OI^{Owr~(WK2nLf2a~_8$yK$fH3$qi$BCK?-5<-5h
    zmk{AhzeK|(ig0KQ44W8pB4SA^*Izh_6hVKcNVy$tmW7m%ha(}+;DP^14Q~WAdNoYL
    zSA<2HL|8dZFDpXLo6&H-n&Sa1f;XN((4M0gt*DF;twhO(O$>?9bJ(CFWpk<7vPt<@
    zr=dXql1H|710=mpM&ehbP)G_P#@0)WaItALtlPzWy=aAtAzF!xEkt066x>1pAo{q`
    zEaNH1Ss67l?kJV#=F}|5ne#pBC{~t7)my}A^GL=eXlk7=7
    z2*C7;HHx^BY9hKwNO@QRMl_n9ykLg7$B#DS6lI$g#O7hUlgCvJP
    zG@HYeP=}aMGs3V>02saE>@Ey@#7fYQz9F9w=5LVQd|~z;h8@$3R)`YON<{f&7p
    zu*aJ%>y&#AF!z*(VW$8}H_n+I&8#_oW;Bzfk(%jry;&DdnH7fZApjPA*NW7^^6#E)
    zGnI{V(QtmwlhRdxe$vVD3P5>6JN;e2(T$`7L`Ra~cJpH;Ye(za%8Z*o&%
    zvSc+OFHIkRpKdy}GJfu2ayQM}a}$rlS5ldQ@4WJ^O$GQPG7|mWF$5QO(;1(e^s*rI
    zjx0#@-rUKGt6iK
    zt{0pcc)*^Ae_Y=dsfCIKgP3U>?y&baYEH=s;CmsFmBe^vfiMvD-C>sZlz*;e{wZ;q
    zq^sTy!1TIm^mQiEVni1SDfj9nMY!Z&qTzg(RBbRmsu#0xAjB+jAbEq)|0kP1XEq-x
    zK@KrNW_+*dQ2?V^gl;ovG!uQWW#L64!f)y&LWuC|XgFVlrza}*$*S+xu_?g0WBiGS
    zhu}|Mv$P0MMh=_kDGTdS|2Pqk@Wc6!Y97!#n1BmoivYmAhq*yywNr_&lmk&|R
    zo{xr2Xz)tCh=m3bu|$LYEomS{(4Q$%ZY8rUq;$*&mRU#d_<%;QhUpRr!Xix~tX!{`
    z6`|(+XgFWZ@kI~>Z#;pZJqSkcJaW+yi|{{<4S|p}!j?$-uvw5mcsguQkwSWtUP&d(
    zCgo`3ticE!O}-)kr1*L
    z-mS6cA!N834V#hSNqR9086swh3{8dl&M=#glo)xakH?r&b>lVwqqlApU%DeQ6qkt*
    zx9cTC2=O8`oG(Ng@E~Yu5?qnG`JOeK@04suSRERLMpMGw
    z2XOQXx7)AZy+SV$Lb5lY;e5%)$CRXm;|ZkLZq?*AGM=j)!<#{?@)B8VT*i&$tM5lgh+
    z%UVd1hh82eIsB5@9Hz9%`w|(8D8C6{^op~)MU+3#OM(#Q_t0>@FvEJchp!Ypt{1Hk
    zC8Cvx^2tS%3%7{!$WB{htEpxpl=$_vcqBjr8$izu%IG~GDR?{X34ZoLc%CwmDR
    z&R6H*vxst!UKWJjkp+p~7ia+^=`k)42GaSQH_LmtCjcS+R-RUsmlXt2MPkE96tGeyenVzVrybezA4@+v^1
    zSHpCrBw>*z5mqd{tOzx~6b&36
    zO5|#CNh8Uz$rlj@Z#2tbO89weCvOKVdQ~4SlO)_m7YPq{>E%J_{*7ojU-z^=CRk|_
    zL41#1#6tUsSfc%2mOzp`^ztCd;Rns;Fs04hwUZA57`@``Ztdj5dPxw%{30687iL)R
    z_ON#HF}-MoC=snhluxdmT)4H9KQvp`Dfi4-JNX1a>Bc#eYbQrL-xh9Io}c*oOfr$w
    zO~28bdEu5CR!^4PxdwhJgVe&yqpKtvV@E+Hpl{eIvN4Q?^J|wdVl(Z!HQP#@PMAm|
    zn2*F)L+lC!6q%~{H$Mwsevh?J3B
    zNAMhg(~Vbkzo4=+OG~90onAu7*rt~ZAgH7;OBKPE*y8)A4FC%lbq*O=>Vdh@F%m^R*OEj$8
    z$3ChTt?)5KEAg>~m^>u~w-5k`K0arb@s#I`cRVNa3qg+pB)yIkg|`Ux(M3YXH}%pX
    z+~wP0IwifAPoo!{oxXPnt4oDdHu
    zzZWJs!BgxcNrMiG-{5GN69-re!(hm>a&Fz*JBSv=LmbvI^4e3_B^M|?gd-t
    zpSP>HYwHykUa+fp`PN+*P2%zLPEg=h5~s%BNO8c)$}uExaB`nmz+M6k*i7Id<*>zU
    zIix%(tnB0ZPL4&+0Z4i)`{hB!v#oOCfV0k3Ldb=B2@%!(`Di%bjRyF++wes->h!Ve
    z1nZT0u?iW@p1LIWzNxdUI~t_6jb3Mu9PlUt4Ixrxf;8@bho>e!B!8paA#b
    zC$q;rCUh(xz8hDFI?J2q=V}Q*G5adQV#zFvDHjOiNs%U#^7jFbUio)7g>t=K8iero
    zqv3qvhs{I}Qz!@Zq7~9dv=Zq*Ifb%tQz&mTTh=KD9pt?|>G>1)02aO8IRc#tA3i)(
    zELH5%9$N7y(nz)SKE136e|--c&iB{E!~^N_CDI7y`}JZLUW=F|UORn3-N!Cpc{-T%
    z3n9^kIV%9Jv7=`f+&X-%l(lLOd?yInB)kZ>PhN#Y+ZMHO-_#wcc#Z-a7Z;tlL!Z^;cZ5S>-|rSFD=_MwnuXkUL#pJ$y64eiiXWN
    z)fsv*3#UTN5~tdB@eIZiHWe7BI84F@#>Z0;3(&!Ea88^B8Ss?l+Fl$pHS7GeP-u_p
    zE(948BO@&qNjYvc%Sp-|2f$Bm;2KN%;0pkX-Zo=s#@g$8MaDRlG)W`D>FG=^j
    z2>|JJ&(1f0Nw1=FmC$jkUOI$Zz6K3<<(AF*p1Qj)(aMJ-o+yoAeyd*0!YvWA#4Y7J
    zmij-$?%ihdk&@yNN-^t0z#j!LdSy5;1MAa@Ggfj?5xY)`^#Q$f2q}I74Rv5Z
    zNiSw0MZ_$TVqs_CHlkRtTZf@G45U4%#*u3nl_#p_L*GtH;2UOhlM>_zuNxzkBx}YW
    z1021Q9G+>`YDLeUfwnR+2}qL&EsyJ^MF{iX(Xbg|9(gpF*0;P?2oo_&gxSQdOBC~g
    zp80f8*yq5Pq{|-Y6^?K7Z0tQ)wQ%;LVAab7ER6Xn_*A%6D>ZA_d&ho!{ATEtU{~IA
    z=U5V(`M_SkQ+M#Y;t91c5C)=^akIRqTxcb8p}4A(UUVWL((6Uh;fADp5nUu~Y}Ct!
    z@SKy;a95ra^&kk?yxzq#^}5Uo
    zZoNzh6}|)w=c|w|DIy}INyKmV=tV3vh=?T`>~9e^$yI-*2n+LOSx70E^^e2(^VCOW
    z4gwgx5~fSG35hg`kaD|TQiPaahlcaT9A9}v@WvAe+PCXPD};<_B|<)I)=d+74jWXY
    zY~E|OY*OyUx+>A~l%niwjQ0Z~y*@@(Y?Cq}DTJ2?_3|P-?9*siw}(BV7p?FxL@V*I
    zg;+{W3T`0)5Pf{xEaNG^VG+b|>RjqJKLtQ~-6lHRh!7uLBxF3Hmki-7KS#s)&XVZG
    z6R?Rif_eEdVA{mk7%@v+##9e~joEypgvfgM86S&039#tZpg7rx6s)*R%5+LE7ea@p
    zpy7NS(vCbqOOpuV=jcT&DsDt9QK8~tMI<|lHb}lNHk8ttO8lA~`nsBEsMSvkazm&sGHRDf(nc
    z@K%7LSM<>?FG6i}ku;|q$+1^D6BPoQDFX&}NsPwaFIA5jlI+(zXClIuc>P0IQ
    zifAPYeRAVhXCoRZhwqy$hm;?&kzwApug3w9UT5lT@FL3WTqSh;S}z^KiGGQO^PMQR
    zy^A1?T_;$Fjs=q@#)ODf;zDNkVv#JHeHCHxXtOM)9Dqgavo?Kg1TcEV-`%FK)AW)c
    zWWN~==gU59B6`^Lb&g)NqU}Jm65&6&>1*LOeO+p{tW)l}0*BtxEnm9^VaINxA@-8&vFG=>UO8rkEwdE+cRTy1;%1|F7W|6atd$F3)+%7<6Lw>H
    zu1a4#*71Na!5dE?Xt(J_D4jWXYY+hitY*LPPjMUM5=@KIgu<7+Svi^>g4oM*lIeHlqzBYq~b^F>r
    zy=aB6AzF#AEyTh&Qg90afav2Uvy7)a=;)LOWuCTrCqUEdK+)_vDT3%Cq2+CQX%TMo
    z7BrmiHflYud-Wm~u7ijruG7oRIpMpP2T7TH)NBq@Qe91@nlbtQ1wf@&rQJ=Ld|59W
    zLZN?$hVvEL`w0J=deI7XB3g+$pPVvTxG9q#n=R{BNM6
    z+b+}KOCpWbRf~@YCo(Z|?>_?EIn;vG%AKnu8>>uZVM!rDS>R1(^O2I`5R+oY(E|4X7`-CwWm4mP
    zdZ`dHd=DDVm!aCw`hLBLg$NO`M1=j#M39Q0KU1Xoc*raZDGBrbNY-4&V}M4lhUtU_
    zVUZ>gR=%s36`|&Dq2YWr$EPd^-gp8*`%}GWg_04iM9GKEtO23tut7!2=C@|cCgoqe
    zw~~9fz}QoCI~kd%AcaCw2r+8`g^{uJYBa3d#ZJGz}bb)PvNN?j59V9@Qk1i56w(Dg>G)Nbr;e2mNgk=cWL>j?-m0rxkXArZ*
    zXH50-XU*m#r9|G#&p2LSAAr#-LUE1&DOquu6zVJVk|Bh60~*d3qFO)yR=tRY3=y$J
    zhKff65b_mmkdk_<*?gyDJHjf^%<~2A1vq+zy9Cx+?xVqbeu>a=m)vz@zWqF)vp39dQ~5-z9HO37YPr?>g7S`
    zejOUl*S%VQVxwNfqT~>8&4o;ck4wfB#LMy5`FR*4EZq_d(4(Y%8_^sBkvrH>j9Bo
    zZ|aCCoUaZ205Ua$8%pOibvTXKMgvGa;
    zWijOee0nMCAdL3`8olc8ZdK=p^|Byz{{b|dulul(=;0uY2lb*A%15*k#*d#5kSvnsSf{HcqEs?M_Ci)&iz(JSDm8d0Z@eu3HU=o+dtWEJ-JRahzT*
    zgkSW({JGn&%1)We!YOFjjB7tfFJ|G|h*{#=TD}2GDqAfUi4-q3%Sp;DNBz)aGCnPP
    z72xT{CA(ip-C4R%nCx67Y*>2P5RUm$G~AV{dtL+T>cuQnjhH2>md_6Nzo7qs*?gpw
    zxU!?ftPfb;3W)S-k$lXU=#iulP2Q=O7oo{Fpy94G+4H>H-Fh(#O(JHACVwwJ8cfRj
    z_X07|;rq>&Y05uWcl}A)$ul3R)r1(oToG(SS*}tI^!LW((
    zAR?9svA?DMq%!Ew6sbavHp@av#-h`cwR2!2AkwR2x;=m}Ns|aOr|D%zD0(v*&R2AN
    zK9JyzClIvf=tV24RYWUM^I@|_pU`vIpdw{+soAng`PiCHQ<1xSU^l?h>u6+;04W%f
    zLddc8k|W$~8V&1qGhZ)S;bw?d;${o6c%Ky9LI5E8xX~=*Dd!Qr{=6Low*xA@z7uU0
    zAnZpM2_tvuWkh()8_{sS$0T+M5U`0fg83f3n1$aUW{KaJ>h*unY(7$I6uth8T?7vT
    zAiY8q+XYDJip!*6AJ$8V5aSooaK0GT`u&gTMJ%LW60Yos#Z&
    zrx%jBli&$}q*uJ%F48{Ysh}=>gFhkL-yqBRvW@rQNh!w@NYRbyMJptWXeE+0xptc5
    z*yM`{gU6d?FeQF5%Ow9?;S}J}tNdt9HsLqANVqstFBd}lr=#I~?Ww9`bWeP
    z{r9ron(*DrgQQGeU^a&-eTsfX#$s&?0O=KIXCWzJtaFvn;pnA9NOT4b=SwuUjFcda
    zT_;%g>BTChJVYAe=L6;an2DIMg(-{lu!jL>dA9_1`t{?$<|kyW;Q1rn2zQXgI%O
    zR9(#bE4`RS^@x}y)uWd6d87}Z#bR8FcP?6V#3KB!Ib2Uy$^+df)h-&32Xt*nB_q>o}i(bs4^boT|+VYTY|I7Z)Hk*%>
    z3Wu2rvzGi_3TX7|aA3xAYQ-5V*;>S|6D?k;mk*)EE6{LPTI_kbUr8@!p+&?j(PCj|
    z;5G{Oe!JAfZ_pLse>0Bf2L

    DilUpW@KBlM=YjY;IDD9A%0WGD+@IxDoK^)g)PL zO6o_FLRdMdmldJQSEFGwx_pyf%tDulS)$9|i$$HJyninc6CJ+OY?-FKb7knAdtI+6 z4<>&Q5b5>L38!9mJi8PW`P@zFGrO`I2ut_tWl4DJ-=N`qZ#{L_e8WDgP^!3234UHZ zXKB!2rlfV zpUiwnFAKu|kp+qWH+R}KD7AuBs})c-@Zr>o1%G<*5z}(jD%te_Kc&hG6;6cmY@ZE* zefX;BUb|j!X5ayP9{zECTcj2$)?ORFgbH`q`x`Z<#EVha8j#A7YQjp)k}(S$$v$|`7W7Q8$!S)(g^0?>cuP^2r)|> zNIuBuf3^6?(|{rqLtUPalpu$gATw5q9}i&kim;cJ;wR~)Ldfu`XgFVnYSWKr=tV3< zh=?U3>~DED@vZ($5m(%5mW7mr`8YXimG}z)jb07YrQn1`nnYN+N-rxy%`Zg5`D%`@ z7a@4#2?Xt|UbI5Vh*qNH!)84;q35tcMargWwro=VHO`ud+!f+C0VKUnM%ICoLLn)H zm|OJ{BV6n?Xjr$4y;Uz-;bMqZ;$jQ2K${fYLI5E8c(+-`Q;x$23qUx;e`{3A~S!zKo4h*%;+#Z}#ed_^0iq{hwW zJ0;r@*7M-9b-k2uPXsu6h1>0_@Qr$j5RyF^4d+WX-e)Hz98Vy{_DsEKMXiZwC2}>n zSexY7JE23;5ZEY0FQn zGgEk;l+nD|{HC(#!#L*50Qb9x)2_d)ZB}9n!b!a$Wv}*m0x9deL zB#Ve8lI>-80^z%t2T7T{*K7_`BIO<8j4ciK0~o#HB&W<1#z+dGz zUOt2u{TvPFdr@p75J4KdPOvUN8BCfORw7o351HMyL$Yl4RfNSgW?4*m0H1!!+8lHe zpwX-T?luQa>19Fa{uDHvulul(=wWlvbM&GWtp}o&DF4aLK?}Dz=wh>Fo$}8CHXe~~ z47v)S=ylG-8S|tLOr()Y%hF4VaMzcj;e2;h9Z0)+F$HxVI8$Y})SQDAjF#-JExe_WyyX!v2&+54!hSTXkHXK&Gpe71AJvcGKeSgC z1F>EL;yBE734%UT{?wuIbnc$?3q3-6e=fAe&1tLp5ZrR0y`lOr{DcgCiU0UA|3Pxy zTE>@nwps2t2i5n%jjhpA)ANAOVtB)j)^KzMf>?HYNA*>B+c=UMI5mIsjOW$^yIyX$ zmc7ubHEq9rM|DOZ9dzQkiZY z=hPd`plH`iZW(X3JrCA$SFC!uW_v&;f++><)|lH2;x_^l&0uEgS?$*GiIjwi>gU1b zTBF=<^cwN>aWo&i;1gyy{JGd-c`uPiv&yoNK_M@gRVVBGaSnf&;qo;A^vZWsF?`n(985pSaZU z?Pl36?w>pKVv)0{7oieG@+VW_kPzY|xsm}nFp@UTK!0XkU(aDCJF z1t3Z&fX-@W9tA+ITH}6m8rTfjt|wr2#s}|jO^~Z~YYvEo%V2G$^T#q2OPO6?ju-jw z8>;WZR`G89r-ZG-wfN7U;-7DYf10fUyf=X5{P-ll`yjjXAiL{elhuHj#B8uio!R&r zW)p2xv9)QoMmlu~0AVmjU==%4MX_@Qb_cl8eLTPMEBroS0Satx+J!xJVfDmrU7fht zxqE=zHW0m)Jv2e=Lp9?CD84mGTVcwg)*$#@$!(%LwJV!i%eRHU-vRX8)V`wK8f{n} zxD6D651StNOFF(rE)R7sv+^Da7daPe?kpDdU>SWsbQ{r7Lix8=K#^EDtI@8$9clhE z_@}j`jCI%@9K|xw2)fsScGbf>61&);%J=|W2Iu=UKKhiXVTBD8eqrn77bn_r-lgWv z_VJU87Wq9jfMJt|Efj(uMAu5dV@6v`pcxX7KZMAJrkk~z4b6fByC*x1-F=WGvcPT} zz&nZ#Q~(*bZK(baG&l}zR0BM?2*qU0dRC*7aetMY1!>yQMnj#P@%EkxJf~*@TPvbU zxP40t{kZ8lyj)v@RvmWPK+Qg9Lpu;MeKx#-7uH$uTW`IU(8RMkCeCCgs$1Yk*wrBH zZ>T;aBc=N6tc2%fB`_{GT?8LchdnY*eXqO6E`T%(*sp-b+Q$jK#ysqesTVwOK?gD& zm*z{sZg0};lG)bOdiccoRNzi=P*dDrI4HO_#X(I)pi<>yw*&yOsr2215(_-T$MQC1J`rnvV1^4qobV@QrO-$qVjhbqjPvY z>N*-#rilF0{)v2RK9Tw>lkjMhcrEdOLH<~2g$G=u`1?fv{C!N-gs22Q!izw6t(`bl zgI74;Ge-63dQNq8ibIO*Z>usTy#D{HnlMgw>f{Jc_wo&y= z_4un(wEwXxXM*+%RZSSDJ^N$jPPS}sDOdW3j4>WnGi38$iuU_dITN)1wW=j0w^oQ8i(l^wH?Nks=T2#_N~=GRFIwuwGBRJVpHz zs=Nv6f39l6IQ4X|AeZ_H8%=fCM?%WOt!XK$kApY&*;_wa)r4`Xll$Q~+27U!is2+-_##g3jo>t{Z&|FkCVVvf04_&;={dG{4Z5`6V$&`)r4{C>AoUP z`QI6%JZdDv?J6m%A5!H>Q2lvT6UM2QY>?r6|I`@YQU5!>t06`Dzp64ONdJ+l3FD+E z_EK=dSD#|47Z?rfqeb*7vd7`oeRcy!RZSQtn=j~(x0lZ}#&py#VauUYG(SU?BcbWj zR81JCS-Sk1EBhtJ_+B0MPxg}4wJG9Xq{^BgewV5Ve9Zv}Z;MKVyvb3Bh_5);Y^b z|CB0Yg7o`UO&BL#)(iiC#`qoy`OfNve^-?!LG`y(O&F&--A5UCx~bmaP)M}AkFpHj z+hm>SUGD`Tvg5l?KM>s#%a%9n{uZy-q#4;b62OlL6tQ@ z{B5cxj1x}}*5|{_j~JtTtV4Ntl6z`|`9W2t1lj*n)r4`f2l-wWw_ckk^E;0jBOHc2 zVcQvdb&B+_sxl@>|B9*!*vT#Je%D^B>ZDpP{&FRGd_PIl&O{m+asE*i_+`TBoTN?6jmCVlPg0 z3U5&5OHe+qYQi|>(um5R8{>O4!Z_3E{>h>( zraFRyoM(CW5NHDR3V?7oR(jBufFb|$xc$HR;&WrFpR zstMz)XZB56#z-IUs64xGa!{2p!TGCIO&I5V!rud*3KKq0dswRwceW+6+pJR;nb)5EY z4vBo0Dt$ujXR4YoUIMb65HB^x`Y>lbYa7JXsx%3vU#x1vJk$K+KcshZqiLqYccywI zFIS~WFnztM3FAx;g`Zj4W6%49?!Pcb_c#}J{KYBG-=RvF;QUWjO&I4~I=cUyF}}lA zGjnmo->MQNxPCy@gmJF3NB941jPMCA@R0C~nTH>!QYKjco~jAstY?nyM@}=xML9B-e>=CxvB}{oadc1d#W+oS97f&>Y&*vRoaBwPf;~toO#&>pPj}S5Br5# zyL&EIrAaV-sj3O%Ov`uo>@~*qC|7s()*fG#D#3O`)r4`jC0l#$GDh`KNVR-l&l^=q z5rh0ydiVFT#l_M zW2_${DtLh^O+xGEshTj)v?MCH$28MfQNfBTO@e7#)r4`TGoym9Ge&neQNh=$5+*pm zMb(6H&ZSYo_Z#E;P*K7Ep-PnC`d(EN#<|Xp3jVz@!n=wJep!_=!TR5+nlR3KW>oMO z#z^lbD)=9&gbB`ns%pYG=Xp`VW6m(uCGRpSxE28P*)LqJYQi}4vZ&zM#uz_DRPZcS znuNxmscOPF)AFd`wZ^zUWK?jsDpi8*m#CUB&bA~f_$p&m=SKx!sY;UI`GBek<2=iv zg6}d$^dX{xcdODQnEo?W6ULcNM+LuNjOW~_;Ad4S5-fj4)r4`DmxrgnkX0uCVvOj~ z_3S)TID>F( z>^H{uNXU2AWMxy8CqcERYQi|xnI{mw)fnHAzB%8;h80xO4dC*ED zKVgjUsNu{!eeh$dObN0-qH4l8+3Ak=w~aA96jmkj`S|~<%8{V?F;x@BY0f!fX33eR zx`eAEzVnZmIRXIm*)jahVN3kzKFIoaW4rAhdnF?~_d`fkRlWq}v#KVH zQ!ZUy^?GA;4>P(mw_M(;%9Eh_HL50zQ#~+aIkn=9m8iP!Ge-7ksO{*jsXF_wRk;#$ zzgN|Sak{fV@%0bJ7$4`1=YHPn5mm+n>Hk~RgmKbk%h#STM)we-J8Mtt&sA9xME^|H zgmI#!>wAv-BUAlvUTl9x}!>AG&9JP3rTi90{5qR5f9oX4y>6zZ&D2 zR~}jOI6qQlNf7-{swRvRojH#){!CMyKi)9r&f<*1%lqsA4yl?jPPudz=NZQ6=6&tV zIh@l}c@pa0tZKqI)#*8$7a8MufHjNq8Ju0J3<;87plZT6$vHDPFEhsW1ZO*c{$`&l zXM*-X)r4`{m(E!w&n43~Z!<>sYNUJ0ZP=a_IBxw0{_a!+@GYur3Buo`YQi|-ODu1e z9z*phV|=el@XcPGqWgYTt_0nGqiVu9-O`ZMca1TARD$u$;MBKN855*`L)C|jqyFq`Oca# z{)#G1g6aRGYQi|v((d`Mj8PrdC7HeQ|5PPPaQ(Qd3FBO6_sXC0Y*W4RurA4;Gd>nx z-)CohovI1rtY>z{w;CgToL4Bhlg7_eB}^#&IjSa%b1s`SE*qmetiQ8njV)E01k*27 zHDR1-`KNnlR4x62Ic^BeB}QGRE{^$TYq&6*9aho1!~k zfA%8w-|lwx%ZnE+f*+6YA75cVLR*6FzsRZQ%X@d8?swRx{F4^$+c4K^xt@rCz13H{R)~>(*u1b{P`Yu%y#<`XT zxj$`;ZD=JUwlnvkeNvSy!S}~iO&I4pbC>1!jd8w4aGtx>@;j=e3Esb{YQi}0vQYPm z=a}jYt`wAKg}ax+%lqsLE><;RoNZ~i`{~B$j@2reA@2>UWC?Yjq-w%A-`OGW9mYsM zN@zSk?7dx;Ho^QwswRvxpBeVP&KU8r`aC!ET~{Sd@V-aYgmKzF}sp@OXH8pMCLT zR81J?I~^CGan*CXAPWEIPXe*4OycR`nrRPII8Wq53diUGpXWL$>6mwZf?fwpX`m z?OR&QJbR|;Iqj+k53~lYda2?<(wyCH4RhABBC`|l;y!zWW2z>MGcAqGKFb)@VcVG* zl|56HD52`pRZSS@Iy)+RwK2jcxVG~nvM*MpOt5~XstMz)XGUaSZjAJ>1*q%nU}@Br;ITk*4$Zf?^9H15*nXWHDR3T%r(iE8>2hym*lQVzEqVk z!TBGnnlR3}bgscS#&Mhpoa^*F!yAneJ;-Zr`5eO?suT&9U$1Jy zILkR}l0RyU?o}b(`D>Ctq)M9L{XSI_N#5-q!bV|(zj<=eqUz@$SX6y0{MMXSFd2fBv`cvNrud;QiZ zrulXWlBYqQki;*|IJ2=!E8(udv+bf&FV&i5W}>xn#;MtUF>s5XJ>7I_Wqf#Kw&qU5 zHFmq-zOCIF=XUHexjI;O=PZcZT7d3Pp*mtIDPJMRkw7b`Ci@(paYp@e>M{B85hU~&GLuI>Ev-nAJ@aOWfTVi3% z9n~*_`5mn>$G^gk-*d1vP%Kp-Kb~E0?}jWDt-4#^CayH6Tg%;PJhuq>syu)DqGoHL zRI%!_wr>Y4AIrl(JLxM*4(!;&$TVaDAE_>ZVugbE&y(>-vvp*t(ZICo#5v$X`_PY8 z!EZFz)wOVGM|B8+_oX{Y-{*ZMra}1`n56Z5JR%O zQl(v;fs0uthjy}?3YS~6c3XybHfE84tj;mBtyP!L2NkzoAlPmO0mBr5TNkV|szbhC z-LAeAo~{;|3HS>khB)Cbv>cj$b-Rh>+gb|q5TIgkjAm<@GY4q>c55gswPxS|=_{WP z)em~|m38O3^PN%v1p_t2ZEf(*a*fqZ?dm+3ZjGGp?yJ{atBgp8Oy%ykr* z`6AABfQSdsYl>mD)Ea_GkYdS&S&{uH!PaW1Dv@NU2SHB7zzMkCTWe_oz8#@q4q9ty z+MLyBK)r=2$I=vL4nC3ANnc0Pty(Si%EZGQk#N)2QS@P`u!8vpN0d&Az&58h|9`=; BWaR(= delta 23541 zcmb81d0(BWC{X5PoLO6LYX@h16mqDyzStB=b3ccAKsM8{dk!}j&bsW>w|?1U3Y z&-i>4N~hGoA#MYTGWX&tRp8GT@Ml3|lHRcq8bDsePc8LJa73h_y@ilYjY#bdW>V|T zCS+5Ge3}%FqRF<77BZmoG7{R!LWXn+B;}pVq}iEh68mO88ADo-hi_UwInV@=r=6K` zM{*|7pG@y!ri&6$tI`^81)$O+zhFd?6D{O**HCgd(M-RohGIzeJNb0?F4UIzy=x&& zUz*9nuHZqBa1z|jOa`rJRJx;EeME0eMkyq;hlK>21IU3MW_su$iX^R*q;vO@EY$fA zG?(n@X(5-BgGhU`nMNK#;pDJ6pPoAc__4h#^!=k~3K^MfAv5~;k-wA8^m;UENXEV= zo*=BZg??Uvz9V(|z$3JDB!zuF$l5+;a<^{`@lKJPlPMO`rGFuD>T4m<1H#F&zGkv> zKqC^;Px|s>KMM_tMdQec{uXjIEr9eNV5VuaP%Cn2fG{+Fprv%tCND&?(pWGYNz);I zZX|JZ z3>r#~4oe{gm%OOkkEjvv^ZKKDkah7j@Hj^@c6A?;pOp$v6Fkm?^*>qy?YooR-TCDu zarMYJOCTEaXB?eX_(NwctJ0s)Zpr7UG&0N?zoM5EkUh z5H=n!L-^4I3)wv}fHcn&kG?%m@#q8Jw?2A@i58Mn;786*ln=JC;0-i^be!0o-gt#_ z$+Jn~fo4{xM}LMMUpzAk6~pBe3}?n(Lgv7N=Z~+Pnt9gK?+QnL7I1u zBB@g?blq^!;pjpO@%xVx88gF+{99;d=|?6_o58Roa=Jxk;q&Ph(tUn7nKQ#ocV|Or zU1ti;(U}&Z=i=EQkOe`c(Ht}B{$Y#KPv+D|WaGjta($kK6fUYya_5UAd^}$UX5$AU z39}Xm4|**H4;F|-Ec{RezSTkzc(+B&z7yR%7Q50hjiI|YE)rcpLKcgr-!Ha^th}>X^KM^5(`HARQ(_RZ%eXu?$-YdJa&ZnZmm!Hb+JhRV2ek>0qeal2* z{Zc0TY4LuUSWOOE$iyQ7Ia=;TLe2)E zU5r88-Wf>fS0XHlr({@qoVJjZ^C9HgX%Uu;Gcqi7zLsIx^^Jx6bHSe^oE2gD>Z}aQ zgm0~3xqHq+R$TNYlg`^7x-Ci~=~sBI389IvkPq2)DH08|p6g29zZ^~GUl4ttRg?8S zDuAmg7(OhC^C0K1gy0Wr@)F9%-1r|v)1|yAiMW`Gm(~de0UyoB^AR}X7!&NA>`>5siCnyiC~3ZmBEU* zW+5$Zg^;pqX7cf^1k(Px?EZ(>EiM7QMvb%tjm*x?7?)1GZwISSymdR6%)4PBmu^Q8 zzndb~>u<_fC;cL0{l%{qGVPu}NxmgwefJjgKn`oGf3wDW#BB=+dgx2g9b3F>kb;Mi z=rrm13y*nMQKp;S6;ZoXixv4)Qu|&izEq1$`(reE0ZmijgNRz%V}I=E#-c4|N5}7r z8(rK;$)w=_W1_VF!Eoe4_WbEl>YGpyx(A_w^hXn_&E{)#H$vWy z#ilB#N|h?4WMBey!pM_8bVOma6GnVIp{Fns0}9PEAu*!RdsPvCW?EPS+E1>A#GpbQ z9iT0na?(Q%(3VZ>Xn`XfjgCfv)V(^iW79UeusY&y(*!5D$R=*IMh!S>MU%P|BBv)tw(!=ppUtih6SKQK!1{ z{wMtY|6P6bPCYanKXES|`@|oi+O)z0i6Z*M-6}v7Os9FE2K2^Glw4NkiKGH)OK<-0 z{`9CfJU3{NzUKpRW+J5heBm1F$_suXLQIA<%O4`mbV%>|TYF@6fNVK55Dq|!v?362 zP_*diAZw$^!Eg^@u{abeQqaKM#GdxwE(ws4@|hsjp6 z!;ugtUC@A=7V4xv4WTJpNYM2S!2!0Cph*$Xf<4cJMnL3BoZ1-Ngw+JC*2LPXvk1kc3GE^Kgc8dlfykB@v{N(dBfV$_ZMhN;G>0Ru#O_hhjw|tWlyq`R3%JOY*rg>L zaV1`836_}>d$qDD@l|VSkSlRPG_+t!bZBE$;@&nkB|dKpB(B8i?W7Xvv-Z%h%jljM zBn#lF4yXy;9fNw%-2^$)ia1arTlLVTv7p3_^;r3*KMg>c=oq^zg`I+mMV}_1KzyN| zb>yL)AB(`>xojM}{-1kmI z^{IO&Yo{*mBwHoCY3iwc7^o4*jWUPrggD)f$Lk)f~~9Q*09T$h|AU@lp>mnEOJN^7PDeg8aWL{g_b5t*dcqbfbkqRZgOAwF5p3Z?I}ZfFBTqgQ zy3(VA;QmLRbdNjopd&{hKkV(OXDeJ}wU4a~M`=t{P1wQ!78L%l7)3Y-KL z@D~eMWZ6DwELh|(y&7B=+J6u1F=uSVaP3!ddqg$H2DeLfYCLb-*|f-Cp-G&qX+0KzeLIy7UI z_%eFRTE2~Y0Xt&%x@_;V;wQj3L0Dt z(p~1u=C(AQfFo=h0zS*hEv-`o&cJ6@+Va*wHf$-f>LsWc%~(3VF+E#smCN@O%p52| zjp>Nx;0!C#x0l;W^qQ4$b?8PEPFt^%K`viqEf1qs+XC#kMm$%2x?zos9D#`f9k&HV zqG2?BJ?s})H&;Z4UR*09nCr(Usx+|#(l?hzt%C@%{RKqmo=xy4m!_|QQ@yr96gSYQ zEf5fh#USdwK?1gL0N>7_&o%&L3x|mOL?;Dhi=Y(mM4{-Sps=|C`kDKNuSzFYZ-)D? z3MW{apqJwItK#;cJ;)zB``ZSJ;ajcU=KCMnZNL5pO`!Kmkry4iO{VhS+oaHUY?rAV zwi6m9mVtKn@3eMJt6j3y-Cfqc*}WU0+TjdxAWxS2(fB>k11y~n?E$MiokxEH4R|`| zl)@3X=RjS`AwdVhGIARYRLZ^KJB$oOwzW~2@>Tf*;7kTRcejJVx&O%Mf`%6aWPQ-9M5n=?x{2H(r84shp;twGK+N zzm@lY!R{A>(zx@$_#%)GN;d@pt^$JA{|Q1^A#P`*3XQ!WZ+~8SJNX`Nf6i`i%$HbD zQ>I-7H=qe)tosxE=zkM=;;WoTJ^{#A@T>b?ffsKOZa2!u(0}H6o+2Z+7HCAz&AYJMlQ7Eu|hGB-|RE? z38~z0ZA_Ou5vTZ$YdhOV6pVd~S78s7NQ0kRCDZ&*S>(ENL@G)X?YoC?#f-Lo;;Sm^ zXTtudK-|uzF*NEgdHZyQ2SnU7J%mq(*&GLz*iNOd@}N7b;Xt%UVAzWj`ol|!*%$JE z?G3=2m03m8iVBJvW}bvME14$+d0&WFto>m9$OorCTyO;XjTwSBH2x@pCcj2}bC_2D zFNB;GHtZD27LLVJuqQ37h5d{7;9kV-&BnAD!g785E5c$HwGCsas_xi}wliU-;+jsy zh%d-tNnT-TNDrB?SSydIip63pz8bdB4fSv+ES|7bbT0?l%Fz+a#nw;N@dRFjM>_$S z)nHTu%<~%T=nM#6gQvQ{5jg2UvwXo_FIOyUaFMIk*SBjz6E1eQT5#kafCK4AwV)lZ zt-fy3@;7d9k=ND>wc*Gk82iz5cQ6g_VCX$}pz%sg>%f!4TNv819<=%{6o=6h^`I5Y zjjkS6oBw#gMV=d3UU0-6EC>e&UweUJUWzAqOT&>q@W^~B_8gArkq8_@XZQk=<&nt` zkh~Os>IWBj9_{dlqc57^02&tn%~&4o3&3(GVQ3(>?j(F11nmYl$G)^hFtmd*T=sb| zta(NC9UKZ*XSBri>5EX@fSzrMBk|%es91qz{;jatgpY@njctSXpYaQvveQRr37u5TgSse`>7uaL8U%+qRzOjq6=Rc0E1%uXxb?3gTpyJ zg|6v_tJC(0xURxPC^z9;sgfXmqE4kq{^#&;3xihUAe<;I?0pA^DBxYR@J{x_A@)%? ztAQ=-^%UHZTJA9Wx!mcbbO_6$%CPt;;0v_ydG^Cg8kkz+Pb9Z)5A3Vp?$UC%+t1}r z7k&)6^Fsp~bqz_^c*< z{1J9Q*}}-V_rXZgXk1hA6r;38Mi?|Q{6lv~F(fS!R?4zfT3@I&@PYjXSc0Ox0vlmb zMSqVLyVHKGEk9u!lJ_h`KR^+a@3rjn_On@f!d^AU_REBL2j-|ow-;LWGyB=xZ#Kbz zO^tJ7K<%tUy>l|MGKS<9=@vsiG=hwl6=CARp*+#t9M2s;L zd>SyMu_(X=!(pD0orhf&JuygYV1O|Ll#SGAwk=x9snQHByU>2NZJ>>2EWupF8T;k9Vk*pwS_7U5(X|kJDex_H_+|$1d>)X}M9Od)uBRZs zrz4vUkZqL)PnL0cI34>b@bByJ;|<{XTwskPFa7{mrNd`pPX&IJ4!_(0UU<)@1V@_) z$^0-2*Hz#@*WnKtz;iL)lHSM7QIT)z$kz>!E9L0eRLF7rT7~-P&SBj>5sY*0|>&VXykgbp4g99Ul+QECJX#66zYWEgr>{C12i%9@76U~NV zm1x@Q$Z-b9%yDw^mXF+U$cUu1Ltt+(fU1U$!8#jh25s*_F)eNc#;mBX8>%i0`LWx#ke1J*@(Rr?2mNpl?K>s z+<@=v@n}rlq>9E79k|>8m^;n(W6^CvcR8dQr+?MaZy2TX^^y}!UyBFPF( zL|}GpR|RH_mYrokn^i}qd>^ESm zis&b1V3J}LdP$3gSLh%dyLD1o4PT3JkN!jz?w4BrU&i>9)j}K`CC0bH&u}$*u~cOt zfTnL&DczT!uMwJ9r4v=;{M1W&cOZ-E2SCRs-=l=`<^Xiimupg`YME=kKJR zR?0*9w&k%F{?H&?u}kF9LM)*ll&gm0IxVz=y_+~=m;6|G_&!m)7NSd89aM~L(K>od zgLE-H?B7D9&ygdlNnEN9-`fD5PY(-XR1mFFAm^@-BJ|8W~5*zpHjV7gpEu!8(tjP%jQ*zOc1>8AcTV*Z;EL7)yelwdfq$K*N-I&cUY=Kt7q7H%cw=BM z+%^-!p{+#8y8OKg9z@f>R7u>QpRh~f{980#T8TQ|(cvc>z}p55ypiMK>rA&)`~M|6{2Bvz{*957twoV}^_wa#$93eR2FQF~ zbYF&K$Q|si5X~JO`IZ4P7Y%Jh=raBvq4A_9+_|uxj@axH>-@!&NX=C^Asu@y}&PF!_Hh3|cAju&QRoj0#IXyqR$&e$)1K8(LD zpXBzRs_g2l!zUQTR}SQ(+K9@N@e|L%qkG+f$gh3O>D z*d^Ukc^ZWc+`qDHnu+z1sNjc{qud_4GfE`|*S1BLkP%YJCEZ5sFa#TmQPffvqq1RRn<`8BS$bnrI~!bRa6ZHtle z9cU(J1vwUP4u$_9s0z8+(OVI!;RY=6GPjr+8A>mDtMvJy?h)o20P`|eD!gy!Yf^rd zYqt)(!vNS;hv5Gk;riL=ry8;@=;-GR&}|h6{VnJ<15~yDxsLwS0G;ni`NhiW9B5L0 ziPM`-^D-%a@8ZEv*eymni2dxUeJuOt$wb9FmpCoAoiT1@tsET7X0d7XcBo1Dz3Vis zf&TUz;6-v)teD@s1(>S&SjLSRHZm(iKhK%2H8a(IGnK`1iwypuaFgYZOlk#n2vKHRc zAe{d=YEm3i{b)r-J`_6m8$ar2KJI zxfXuVemGzH|0=^>(o*I04K4SY{ah|{{*uIjR@8?{fm>^n^2dxO_>Zzw8mz)kxF~;< zN>Q|_jY}i=>8|(@2Zi7NvNM%!io|ba6>ai#ZF$(ZA^h^OWD>o zrhF7%1#A}-Wj=%7i5C=&Utk(yvG5;`r^SO$dHmPma72nCD*sD^?wLd3RQJq`csjV9 zsb%nR{%HjN_cYwmJ#!=+&&$k&f8qa^&Wfk|+L_wn?08zy&J@J{)`>AS49JGdV0k|O zGos78@Z)vUxSU*W9onSEm_k|GDKYS?`!(@&b&RPAUK>wO$Cz5w-T*gcPGR0{WJjCg zscWn$Y-$O7Xqc(KxWJ{`n4#mdGJ+Dbhi1HAlnehigQi914!CVj(d?qZbBY!?bDf()NrXVP4YI=f$fZT6ZQ%ZJ4F_lGb zyK1GCSxKUZ*?7ronkJ2-keWnJIc92_TH`c+Ywvv^o9BC;nR%Y?uW$H+^X~UuYwz{0 z^;^HS_I{7n^t|?JPp2B^7ubhRYdnm7b8Xcn<+Cd)ZI$+hyk4^!^1SNqyodWe_QRX_ z`DjnGt$c1(k-fOmSXy3Eou6aRF; zeJAqE)8RIb_tS8(PcXjiXT`HmnX$KjDkfhZh>!Yf*m!vtJdMnl> z4#YM3Elso2%>a4$b$|tH3_7x_7e?s4@dJYuOKT(1!$`Kz3&$l!4bvC@g5D+#R~Yp8 zsL6`Y?Y6*Xd|hur2&%>8mr}7kNW%wAMzor(xO$xl&daAe&EA-dF?d(7hB157a3B2! zn0+uP#LClCAOtHyl&#-~Xgqo>EW|fMHS8VYhkp;XVpfPA)54SocZX>h9a_Z-??Thi zSiC(zhu?;JVOY2oW5Wzs7p{zTiqQCiNcfbkQF2!zbm$iDjiyK|zqlp0lod-hm@s3YvS{x>GC3+Ay<#*x73G7~G34F0NHC&l$UuyBcf#8- z-oil;O?NC<602+4*?te;1dE2#WAwP*V#SNMjd;^?KX04>VOSWaVNL8246=%N_~IX| zR!n|h56^Ya=$7EkJJKK=vl6tXjd2x#4-eANZuP=12l?VIoj-20#=ys2@~{nc31jfx z!O@uP;l#E%@m6QJgvKFp^kl=J7e;s6jAEefm>WT1=E`2=5U3LcZN%MLnRuAS}=WtP9o&c2n`1f*JDVs za%FY0a^?AC=}Ksp(#J;W&^6fyZ;n#0L@r=Qow3yv%MLr^iuZ@(0ZqNoVdthbaf<*h z2t7EAt)oKO-<;Je`DqCR{d8?U9vhR+8#7@5j=fLg{*&Ql{PI2x=Zx{iAt_cgerLuP zQ_|6R-vHE&6)5z=OX$Lrvmh2TQ#D*O#|1-E+_5>;`Y#Eue%uVT)RW`1|8nAU8K2hQ zBfF%iG{4aPJL;P|UQ!>thpl6X9`3Kjy8gG}AdLnJl%ipR4p*icSzAvu?TP0{lPIIs zO(38UP54h#H1R$CP8;usscBYxYkWAqo2HUBAf1@*{%l+{NyFF)f%un6it$EgD4;cG zC>&qSRB;`g=8lsmD=e>{tQf*HMU%{jO;a@7H_3=@SqjtnSxVq+mg2kDr)rom*+5*a z%ZeAAhMCipnfsqmxZ(2bc*SRn9@wUMZ0!sU&rJ6m@xQ3 z0ZtdxKj4PXJSY*g7?gp*$B_mNcn2vY@OgBc%}Q!vN<~ZZiXLjpCj1_``Oa* ziarQF>6U1;Ie5sLt>N4U4XmR#*G++V{Nc6%FK0Wrs31piQJr0Q${+e86vYcRVgW8P z6@6V~D(=r!sp#t>QxTKq#xZ)7mmLOUr3w)eOLxIZVHR+~vL6Cee(cRtY7<-(B@NAw zV-s9(mpv99!yoOL>G0s=0r<=!UtF^^iM`WTLSbEB+-)AknxznlN#ga5eXxm!uKfrMNql1l!7$><5av(g zC!fOVXQM@(W#RqA`gr=eII4{I)cPWSsy`;xDtNqHD}BM{^pmeJVzEy4JgG}GT=I+$ zzOzKR#IsJigf%F!opm}z0;Z)J8fx|U>{8{K(@T}>GM-nSIiciZm+7!>NdQh;X2sJ> z5^?=95~IG(f4Syr$efp7N^NSTz1)_Kr|bNUzq?bJUu_p%aV`<1vBF+iRT_=^S4_m5 zrDhDTSB?&Hm5w&J$``#^ulzkCwWAw0GKGhjz2#*^nWJAJ-f8KHk2LG>%~!0rquGRi zdnFYIY^}x8S2fv6Cch?Ryz%ANta!L3694|13^7z=*{c>@vsx#mj<3;h*hc|qeO&7wdWmiV zNHC6WSXiQ)f>C%=99?74kzm>;=r;MFo7ShkF$RjH+Lh`kG`Iy}=$1S16x*!uw76N~ z2{v`DfV!>T|Hek=pON*(R)s7$DlEg27M;WwZ*k!3MvL-v;Wp*zpXs*?UsFGl_yRxt z@M8;heB>=r7Q9`B>?isql(GACXt@@TpS46_>(W7f`(g$}O34=`1QzdT1(IAZf zLJ{n`FKEN~xefas*KpV8zF2r%h2rpW847qyOphghR;>TX2_e-BceT1>`w1Bo_)OS? zb56>=8m>L*D2eH98csYOh#T4xRCmy)eT&${Kmb}gE%m2-aL)-Z7S$hrJeY}3pHhLp zev14|dqU?k8e*Fd+Rl(QZIQU?jPhgfSDM-betA|y-6=iBcc?uecBsA!-lf?GV^wS$ z#&#-O)6TeKeW%tV{WRMUrI8>sQ2S1WGQksFKQG1Y-^DSyOgR_Bay(UB zUOYFD<$F?Lu15CP?P_D?enG>N-+HpQJVnkJ?l|%2I<5HK@zCB2|Lw;^k6%o~h1Z>b z=an-TM~Gc1>y+`pk{?$8R*q9H(b2$pZ(MW9%5P`UnZeCVG7lgaTd!Jh^<^FY(R~Wu zdqu-37pM`sl7K}iQTX!}l`RE7YGSvA$F7dW){EX$F<-wFFAFizLF-j@Y_aQ_tX=g# zDb7f`PJ60LUii{=nOVfxU4o_?>U3c44Mlgq&~MEZZ_K!<=x)PJMR%s39Xa*Q&l-AM z3t;Zv1dmy`;uo1wc+V|r?63Rd+*=8_W}Oi~zNJ_z?(fn9RzSxybfn|eCaYc6%3oDJ_Wp>P6>7 z;*=)M2g_nl^RnI$C$673-=B8?xbr{th6KDaCx8dLz!rJIUi9Q5M99=|J1s?gC4J&vz&B^?f0bP4rc- zN*fVw^`L_$I%4Em{ps=Bu3+NZ`je%!E8~4UX%^a=^2aPT7xxC z%L@HPG`KNbB@fRHr)jA&vgiCop7Qhv(D75j5WuS=NQDkY_@@yLJBLL&>`aNGd0z|# zZ}^OV7z)$)^(atH2=|VrM^6m^UtShX`gF+4k3|c^$Qz?#NS}^*c|$A-*&?W-U5h0l zI^yNyEDq~Gvd}Ye$jcAK(beK8%CV4mk`nn`9WP8FZ=8=8rqJO@9dC`Mz*Fw=V+myX z1JUHY8wn&Ya(nt9$E>Y`$S2|emtPx9v!0Bl7o0qVU|LF>3_3e;qP%VsQ(4%b=~yQ= ziH9Zt4Mq^*r}p(xO4w5E#e`^%9|c z{ty@pWkNR=YqS(tB&XqLhdW5aJc3?OoRsow;%etmvLz##971J@Hzf-P5{^Sg%2@t( zT*{Y>BI(DIAb@r1MFSA|FB*V`ryHJV5IpW*a6y?+qd832rNMvs2`axb8m4>S>A~Nq z#*6DFfGXTuH;;j-z{d5fIhK;8QDd+j}@s2F$c|Qf6lAUj&3qE2D7`fX75)xTHcY*^M zCnrc`r19@2lF+c>G+o~mTK&E>A};Y+hx0TcN?GWVP7>n74h-jISp;{3fqy-gmiEt} z)Dj0`l-iKRZJG2e(;x}}ydH1k!D3M0wcyMEj45AFcVx{-2L6iYlr37h2 zMS$l{foML*oA$s49yOd4Z{H6(Xy$HH=ye?VyhG`4H;8;Drji!YNEGPMsa<8rz>AYe z->O=9vr5tD6q>0nawMwD`l!dS0_O@3pA?C9G(I3U>4#cGhM}z zS0Y%t#wmRL3{W34xM?OiPUdKnxcX%>c(G!mD7{m7&MZeDzMS>XDf;e%R7JLB5$ik8 zreKI3lIPAA!6kNWn@!@>Kf?ZQgL$cq0{6%?@M9;8|6xvlC1$bkn1@tO!^Q4d-A@^N z^r(6$Z$!Atf9JaY>=W((v#-Q_bv8`7a}U68X9H~lyb}5B9Qbz*-2RvEzx=ZtDhA>M zFweIWLM$@~SbOYUtwTVr10(ghj%1xYm!xjYq~g~&*TExq+nQA2_=_P z>u~Ks0@0WGBq_SN5d{K6)H-Y}=xQCx=8@xra@CEPa=O9iQkqUHrv*g6u)f^UfcjU^ys}?tt)#2H`P46jSJ4XMgD|hFqTW(` z3Fe{m>FMTq#23%b2cP;M^jNyV$i9^puy29{$FQH|a$-ar!b^kR!W(DpZRzDhlxsiyjk!Po2gcz z5%Cf$=o)cLIfSuC&4Q5FBBim=EC`7$S27FDl8o4ET|@3%Ma{(rB$RDq%}S+-rgeO_ zVSANelUX(w?2GQ<1M{i9=oETheD!bEd|%a=nDO-s&gr}71=8vDYaLKexN|e)#kO-N|D;CJd?O7_4?&)$s0&YTW>C) GviCni5#mDt delta 7270 zcma)Bd0bW1_HS(g6&}K6xQrJrD3gG2FP9MYqrwRUBLRmr6$Nv`5dr53Dt!)V;*xeb zq*&QYeJW0Zb^TQZ^mM6KzsLs74ue9u9&?ddUVj310*J8M`EBWV4=T;vxC}UqrVNe zK57d;H0@9r;D_7(8IQjOXgINP33dw7aH*jsE--mvO^^-0Hqp1BY+SaZCEn{2&LSCJ z>5z!)f(<*ig|r1`Q|l6w0gdJmTxqi5pQW+*#Iy@9l|9B&p&GssZbF;chRY6F5Y5@V zqyr?NbC`xtKgq_la1CE-XU3Xv8xA@ghAwUEaamq2y0+8MBhsH;X8dGt=#K~6WlKFf zMQCW=G8d21?~|xNjEc1J%@#0WMWmG6lIOL7XnYu{@i(k68TUkKcs|C6zR@;J*cyr> zqnpkGL$EQ{f}Sx3UN997j^e~q7`{KBQYEA4UfXQL=C%NJ%+8V$~%i}URZ3k;R%}w8>~WJ z3??RNJ3OY90d}`(I3UG{t8F&)n2`Vru+eJ4&}2Vhd1W7X8!br|HqVJ)?gsxw-xLkE zq=vGcPTZ0PVR$dq0(*%o5xb3x7J$L*jvWLb;Siqaq~X|9D?8+bg^jD>x)4OKja_2c zLnln@G8F!l`yKD(X9o>>ch)3Y@LFdL>$`@q_Rd(_zW~>DQ7D<#Rl}Gx6Q1v?%!wVl zDQ|D=rVM$#yS7~O$ILW!0UE;C3w-eq?H{j_EBl>FrczKsAx@RWC zSM)UzpZ(MW|EZ(*((V|&_oUIf{6 zQBNg-y{JimuglS}VNh#~%{L%>$%ZBQG5G3B_1L%ftMD&0WLYpb*MPtG4#A3C8MJsN zR~m)K;#@}T;Ipo723AOPQ#7(Xs8}aIfW{e4i?H# zVyj!JL`IT4d$z{1kzV*!k%CwIVR>ktAC9YsDR{9Hiq<^bfN914m^WMo7Jf9md4lj8 zq4B%j;CqZ3so}e0!*Je61;Kl%u6Sc?6a-+^I1y3#m^Dg8QxrB%(4Zr|m*9mcOdY=# zGMla4Cd9$ZXemjOQD_^Z_y!3+e0i*u5r9M#&S+v8ge^;AU=gtt;<9lL+q#U`o&W84 z4L6sDV9EsL_%Dgof(;WK(dsl&IsG*K22M6%Vu|jBsuC5IEhed`EH2gX{uF;aQmX70 zQ>N^;s!Y#B=gKwwV!9DiCM$!zIk~yP*c4?jHpf-E;_?&&2F(h^)>DPud~npP2!^iY z+k~bxbabjRKn+fsVU+dYwdqz?<0_268eI9LRIuQcDc-nZjxRgmD$)_|o}qZoNFKe8 z;kPrb?2PM(aQW-O z`0*UwO0IL2m1fOVR=QfD;plmOm|m%@R8y&}QnL9yBdDK-37P4XTa!{HvDpb z7*?;$#>7t+;?Py9-KtrwXqFx@V9PZ&Jar%z2d`1)I=x2GjC_wWk<5tx) zZ~aIjD*?Rdm4$%^R^|=C*pJo8j~_AMqK|F-(*%g1)N5x7+o;OasEw*jJ)qy6M+30m zCe=4?+~iO;e6wDsc5bHruGYxj@sQr&6;5ErQU8>+3igZL(PN7Xp57jgPqrxRoo?E( zVrz)3G7VexY z>Kv*6QrP-}eg(Fo4l8WUI_yyDm%}=?<{y#R!pleWtd{zf>VYO56dsbn^tMpjClNv7mFtRQ9)UJQgOw5i@Jkpr>qc5jIUOA&mNXN5HL~Mf;f|t+AV#{`_<+gJM{QRN`FPzgY()n9y zS9XDPT4lk@-+Ia>(ffjit1kIt`Gw@=!`kDi3%m5bu<~M0G+!~|m5X{$`22U-a=hC7 z9U*|ms{xm&yT2BIJ1(hG?e)Df>>Jw(6H4$vi1D*O0!zVHu}hf zmvGe}`$4uxt-D<_;LMw$SbUA9>YEnacddCd6!(LM`)(EC)gLt+@{<`mUsq16`6-@( zuPi=dfHB@sQSE^zuV-M#n^wwh_it}v9eicY@xbGEB3M^n@=+3odWPfFTZ&%8{CgP~ z^pllozOqOSQw$mox*x?#eP!DCw;>8MZ`b1|Uj1E-MSQ=XRW$#Heg{4F z!|Vsic-$0+8y_?Q0KG7NOn+i(*$6B?Sx|`C+dO)6ftO7FQcNLJEo@m^afj_!FRS{YIbeEb${Qjh1yW`9| zJAo%o{xeyn_#=NR{`6<6!8rXal|OL?bM^C}W)gUf*EoTtx)GY%YVHi6DIoGg^vEk) zK@bmT#6hFJ*}Xco;yL+;Zfp(D~f%U603-D^e&c@)d6IaN7m22( zou~Pb=;|Pd;Ol+J?Be2vKlY)U4!W!1-Hi0H+yrL+zEL-=lOIW-L6A@Jvzzv!zdA(- z&j^rq^5Lt@5W(sLWQdBSO8p@>1}amr!vXdVk@pTF9ce9E#G3&kV?h9)5e`x8H+tD6 z3FkbB9Rlv(MTgI@7_?I4cD1vV9D-aka=wFKHX zg>(lCy`??~@|xa|f3`%@_Jx5WrSmRfx+`{u>8{8OC(zVKgBPy}2N|TCwUua-cY%CH zTQZBd1?2nN5oK;H;W9Xaj7QxapA^xI@>YcIf|-$Izuob4lrxb;NH=~wE=oZ&fv=Au zD!TmRrO`xnAQD_SU0)ekVvxF2=*Vkg=={_^@WL1h4S}0=F~ma`g4`vRc!mm_j*KPe ziaSC6MeJWtpAttT;#QEC#nV%lB=F_m#FH9yG03AjIP`@rVkZk*6U2K>g>ShffyhTB zgOM*zumk6Z1RXfR7Wyca?VLhEG|fU3;-Zfqwm2m6G;34zKc+{%J(X;bpG36cqK|)& zM6@E*jW#+D)gm6AOjI{JgFl~{tmE-SvJ@D@GoQEP@n?GC`@7OXa#Dy&g!-nG=7)yu z{A98M4bSaJ`pKwo_{&Qd?ve&E>_o7bI|slS@loEF@#;=Gq^<=^IDXif;CL-q-k33F z;`e8Qh0d{9gBU^ybfJkv!2h!@I-8%#jzm7aE3wjA!2fxZY%=^z{Of__uwfbC1(}jv z))cQn4V6SoY=UO56~zK zz8eUxSoVPz->H$E=vJG%z0k}(ufJf2vTp`iw6YhahVl#o{j<{c{8UCW$`><9tjH#} zdcaHkX&!+hCyNMa2C1vbg8sk{^d>X6?n$l{GX^j0Nk9<@-S8qwe!nl_(m6*5-N+nJ zg$CC1qMo{&N_r9hN)253rI*2zukU4t9OUXpE6XJvC~3i2#no`knP1ALYYOjb1NX|e zgKAVh(G#fPsUi~zGXyPHFY-yfNfk0lFa+|(zT`E!NaoRf#cUswcxi#o=Vt!@jpe~+iCs6d8q3?;m;6vM0lbshk&nHHQu&}u zW4?GOxl81=;zBa!s|x=bN}Y#*zmy7f4R8n8ex zTiDbvnM&d9rscqC6dW7J6Hl)=3wN9Fm#np4lJTt*C=|s1y6|fgX`5GOKq%Kr?D4R^ zM7N#KBvM8c@w`%^vd#iO)+U^qfF$bwxl@@#8$PSdo`#t(ey4)T( zjpf85vT@2}5-hUuV|w5}R!}zXH-$77;qs3udUp^w)t-$brxDfO*TI)<43`KK`S^!v zP%pDCH_o8XZRSG=pEiRey)Ylb*^h$IyKd%u_?Z%F+q#Jh;aZEHkHVjOZp<(A&*(j) zca6C#R(I&1$#1rvP&9r-`Sg+zW#T{i#t6EFqxUS7Bnu~x4ihgW-`_^>8XZqeb*q<9 GPWnH&Blcfail_fast +

    favor_state¶

    +

    Type: boolean

    +

    If set, defer to the argument provided to the state flag for resolving unselected nodes, even if the node(s) exist as a database object in the current environment.

    +

    full_refresh¶

    Type: boolean

    @@ -83,7 +88,7 @@

    full_refresh

    indirect_selection¶

    -

    Type: choice: [‘eager’, ‘cautious’]

    +

    Type: choice: [‘eager’, ‘cautious’, ‘buildable’]

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    @@ -193,6 +198,11 @@

    exclude +

    favor_state¶

    +

    Type: boolean

    +

    If set, defer to the argument provided to the state flag for resolving unselected nodes, even if the node(s) exist as a database object in the current environment.

    +

    full_refresh¶

    Type: boolean

    @@ -349,7 +359,7 @@

    project_dir

    skip_profile_setup¶

    Type: boolean

    -

    Skip interative profile setup.

    +

    Skip interactive profile setup.

    target¶

    @@ -369,7 +379,7 @@

    exclude

    indirect_selection¶

    -

    Type: choice: [‘eager’, ‘cautious’]

    +

    Type: choice: [‘eager’, ‘cautious’, ‘buildable’]

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    @@ -440,7 +450,7 @@

    exclude

    indirect_selection¶

    -

    Type: choice: [‘eager’, ‘cautious’]

    +

    Type: choice: [‘eager’, ‘cautious’, ‘buildable’]

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    @@ -560,6 +570,11 @@

    defer +

    favor_state¶

    +

    Type: boolean

    +

    If set, defer to the argument provided to the state flag for resolving unselected nodes, even if the node(s) exist as a database object in the current environment.

    +

    exclude¶

    Type: unknown

    @@ -757,6 +772,11 @@

    exclude +

    favor_state¶

    +

    Type: boolean

    +

    If set, defer to the argument provided to the state flag for resolving unselected nodes, even if the node(s) exist as a database object in the current environment.

    +

    models¶

    Type: unknown

    @@ -824,9 +844,14 @@

    fail_fast +

    favor_state¶

    +

    Type: boolean

    +

    If set, defer to the argument provided to the state flag for resolving unselected nodes, even if the node(s) exist as a database object in the current environment.

    +

    indirect_selection¶

    -

    Type: choice: [‘eager’, ‘cautious’]

    +

    Type: choice: [‘eager’, ‘cautious’, ‘buildable’]

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    diff --git a/core/dbt/docs/build/html/searchindex.js b/core/dbt/docs/build/html/searchindex.js index 1fd56412ddf..dcc633eb9a9 100644 --- a/core/dbt/docs/build/html/searchindex.js +++ b/core/dbt/docs/build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"right": 0, "now": 0, "best": 0, "wai": 0, "from": 0, "i": 0, "us": 0, "dbtrunner": 0, "we": 0, "expos": 0, "cli": 0, "main": 0, "import": 0, "cli_arg": 0, "project": 0, "dir": 0, "jaffle_shop": 0, "initi": 0, "runner": 0, "re": 0, "success": 0, "you": 0, "can": 0, "also": 0, "pass": 0, "pre": 0, "construct": 0, "object": 0, "those": 0, "instead": 0, "load": 0, "up": 0, "disk": 0, "preload": 0, "load_profil": 0, "postgr": 0, "load_project": 0, "fals": 0, "thi": 0, "For": 0, "full": 0, "exampl": 0, "code": 0, "refer": 0, "py": 0, "type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "unknown": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "have": 0, "been": 0, "explicitli": 0, "string": 0, "which": 0, "overrid": 0, "dbt_project": 0, "yml": 0, "path": 0, "directori": 0, "look": 0, "file": 0, "current": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "includ": 0, "The": 0, "name": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "configur": 0, "onli": 0, "appli": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "how": 0, "invok": 0, "command": 0, "python": 0, "runtim": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "resource_typ": 0, "select": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "model": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "project_nam": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "macro": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "How to invoke dbt commands in python runtime": [[0, "how-to-invoke-dbt-commands-in-python-runtime"]], "API documentation": [[0, "api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "resource_types": [[0, "build|resource_types"], [0, "list|resource_types"], [0, "list|resource_types"]], "select": [[0, "build|select"], [0, "compile|select"], [0, "list|select"], [0, "list|select"], [0, "run|select"], [0, "seed|select"], [0, "snapshot|select"], [0, "test|select"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "models": [[0, "compile|models"], [0, "list|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "project_name": [[0, "init|project_name"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"], [0, "dbt-section"]], "output": [[0, "list|output"], [0, "list|output"]], "output_keys": [[0, "list|output_keys"], [0, "list|output_keys"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "macro": [[0, "run-operation|macro"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) +Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"right": 0, "now": 0, "best": 0, "wai": 0, "from": 0, "i": 0, "us": 0, "dbtrunner": 0, "we": 0, "expos": 0, "cli": 0, "main": 0, "import": 0, "cli_arg": 0, "project": 0, "dir": 0, "jaffle_shop": 0, "initi": 0, "runner": 0, "re": 0, "success": 0, "you": 0, "can": 0, "also": 0, "pass": 0, "pre": 0, "construct": 0, "object": 0, "those": 0, "instead": 0, "load": 0, "up": 0, "disk": 0, "preload": 0, "load_profil": 0, "postgr": 0, "load_project": 0, "fals": 0, "thi": 0, "For": 0, "full": 0, "exampl": 0, "code": 0, "refer": 0, "py": 0, "type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "unknown": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "argument": 0, "provid": 0, "flag": 0, "even": 0, "exist": 0, "databas": 0, "current": 0, "environ": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "buildabl": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "thei": 0, "have": 0, "been": 0, "explicitli": 0, "string": 0, "which": 0, "overrid": 0, "dbt_project": 0, "yml": 0, "path": 0, "directori": 0, "look": 0, "file": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "includ": 0, "The": 0, "name": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "configur": 0, "onli": 0, "appli": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "avail": 0, "inform": 0, "skip": 0, "interact": 0, "setup": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "how": 0, "invok": 0, "command": 0, "python": 0, "runtim": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "favor_st": 0, "full_refresh": 0, "indirect_select": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "resource_typ": 0, "select": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "model": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "project_nam": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "macro": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "How to invoke dbt commands in python runtime": [[0, "how-to-invoke-dbt-commands-in-python-runtime"]], "API documentation": [[0, "api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "favor_state": [[0, "build|favor_state"], [0, "compile|favor_state"], [0, "run|favor_state"], [0, "snapshot|favor_state"], [0, "test|favor_state"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "resource_types": [[0, "build|resource_types"], [0, "list|resource_types"], [0, "list|resource_types"]], "select": [[0, "build|select"], [0, "compile|select"], [0, "list|select"], [0, "list|select"], [0, "run|select"], [0, "seed|select"], [0, "snapshot|select"], [0, "test|select"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "models": [[0, "compile|models"], [0, "list|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "project_name": [[0, "init|project_name"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"], [0, "dbt-section"]], "output": [[0, "list|output"], [0, "list|output"]], "output_keys": [[0, "list|output_keys"], [0, "list|output_keys"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "macro": [[0, "run-operation|macro"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file diff --git a/tests/functional/defer_state/test_defer_state.py b/tests/functional/defer_state/test_defer_state.py index 134cae1c626..7b88ba69e8b 100644 --- a/tests/functional/defer_state/test_defer_state.py +++ b/tests/functional/defer_state/test_defer_state.py @@ -6,6 +6,7 @@ import pytest from dbt.tests.util import run_dbt, write_file, rm_file +from dbt.cli.main import dbtUsageException from dbt.exceptions import DbtRuntimeError @@ -98,7 +99,7 @@ def run_and_save_state(self): class TestDeferStateUnsupportedCommands(BaseDeferState): def test_unsupported_commands(self, project): # make sure these commands don"t work with --defer - with pytest.raises(SystemExit): + with pytest.raises(dbtUsageException): run_dbt(["seed", "--defer"]) def test_no_state(self, project): From acc88d47a31dd97a184cd372ac6f4ff234a85f92 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Mon, 30 Jan 2023 18:38:36 -0500 Subject: [PATCH 31/54] mutually exclusive handling for warn_error_options and warn_error params in Click CLI (#6771) warn_error_options, warn_error mutual exclusivity with click --- .../Under the Hood-20230130-180917.yaml | 6 ++ core/dbt/cli/flags.py | 28 +++++- core/dbt/cli/params.py | 2 +- core/dbt/events/functions.py | 6 +- core/dbt/flags.py | 12 ++- core/dbt/helper_types.py | 16 ---- test/unit/test_flags.py | 13 +-- tests/unit/test_cli_flags.py | 88 +++++++++++++++++-- tests/unit/test_dbt_runner.py | 4 + 9 files changed, 136 insertions(+), 39 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230130-180917.yaml diff --git a/.changes/unreleased/Under the Hood-20230130-180917.yaml b/.changes/unreleased/Under the Hood-20230130-180917.yaml new file mode 100644 index 00000000000..64c35d67f12 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230130-180917.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: warn_error/warn_error_options mutual exclusivity in click +time: 2023-01-30T18:09:17.240662-05:00 +custom: + Author: michelleark + Issue: "6579" diff --git a/core/dbt/cli/flags.py b/core/dbt/cli/flags.py index a8af62bf61d..dcfb59507c5 100644 --- a/core/dbt/cli/flags.py +++ b/core/dbt/cli/flags.py @@ -5,9 +5,9 @@ from importlib import import_module from multiprocessing import get_context from pprint import pformat as pf -from typing import Set +from typing import Set, List -from click import Context, get_current_context +from click import Context, get_current_context, BadOptionUsage from click.core import ParameterSource from dbt.config.profile import read_user_config @@ -59,12 +59,15 @@ def assign_params(ctx, params_assigned_from_default): # Overwrite default assignments with user config if available if user_config: + param_assigned_from_default_copy = params_assigned_from_default.copy() for param_assigned_from_default in params_assigned_from_default: user_config_param_value = getattr(user_config, param_assigned_from_default, None) if user_config_param_value is not None: object.__setattr__( self, param_assigned_from_default.upper(), user_config_param_value ) + param_assigned_from_default_copy.remove(param_assigned_from_default) + params_assigned_from_default = param_assigned_from_default_copy # Hard coded flags object.__setattr__(self, "WHICH", invoked_subcommand_name or ctx.info_name) @@ -78,6 +81,10 @@ def assign_params(ctx, params_assigned_from_default): if os.getenv("DO_NOT_TRACK", "").lower() in ("1", "t", "true", "y", "yes") else True, ) + # Check mutual exclusivity once all flags are set + self._assert_mutually_exclusive( + params_assigned_from_default, ["WARN_ERROR", "WARN_ERROR_OPTIONS"] + ) # Support lower cased access for legacy code params = set( @@ -88,3 +95,20 @@ def assign_params(ctx, params_assigned_from_default): def __str__(self) -> str: return str(pf(self.__dict__)) + + def _assert_mutually_exclusive( + self, params_assigned_from_default: Set[str], group: List[str] + ) -> None: + """ + Ensure no elements from group are simultaneously provided by a user, as inferred from params_assigned_from_default. + Raises click.UsageError if any two elements from group are simultaneously provided by a user. + """ + set_flag = None + for flag in group: + flag_set_by_user = flag.lower() not in params_assigned_from_default + if flag_set_by_user and set_flag: + raise BadOptionUsage( + flag.lower(), f"{flag.lower()}: not allowed with argument {set_flag.lower()}" + ) + elif flag_set_by_user: + set_flag = flag diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index 915769c845d..ad4c3d28870 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -398,7 +398,7 @@ envvar="DBT_WARN_ERROR", help="If dbt would normally warn, instead raise an exception. Examples include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests.", default=None, - flag_value=True, + is_flag=True, ) warn_error_options = click.option( diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index 00407b538bd..e06364b390f 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -168,11 +168,7 @@ def msg_to_dict(msg: EventMsg) -> dict: def warn_or_error(event, node=None): - # TODO: resolve this circular import when flags.WARN_ERROR_OPTIONS is WarnErrorOptions type via click CLI. - from dbt.helper_types import WarnErrorOptions - - warn_error_options = WarnErrorOptions.from_yaml_string(flags.WARN_ERROR_OPTIONS) - if flags.WARN_ERROR or warn_error_options.includes(type(event).__name__): + if flags.WARN_ERROR or flags.WARN_ERROR_OPTIONS.includes(type(event).__name__): # TODO: resolve this circular import when at top from dbt.exceptions import EventCompilationError diff --git a/core/dbt/flags.py b/core/dbt/flags.py index e5b94c7415b..f3ddbeb49df 100644 --- a/core/dbt/flags.py +++ b/core/dbt/flags.py @@ -9,6 +9,8 @@ from pathlib import Path from typing import Optional +from dbt.helper_types import WarnErrorOptions + # PROFILES_DIR must be set before the other flags # It also gets set in main.py and in set_from_args because the rpc server # doesn't go through exactly the same main arg processing. @@ -46,7 +48,7 @@ USE_EXPERIMENTAL_PARSER = None VERSION_CHECK = None WARN_ERROR = None -WARN_ERROR_OPTIONS = None +WARN_ERROR_OPTIONS = WarnErrorOptions(include=[]) WHICH = None WRITE_JSON = None @@ -170,7 +172,13 @@ def set_from_args(args, user_config): USE_EXPERIMENTAL_PARSER = get_flag_value("USE_EXPERIMENTAL_PARSER", args, user_config) VERSION_CHECK = get_flag_value("VERSION_CHECK", args, user_config) WARN_ERROR = get_flag_value("WARN_ERROR", args, user_config) - WARN_ERROR_OPTIONS = get_flag_value("WARN_ERROR_OPTIONS", args, user_config) + + warn_error_options_str = get_flag_value("WARN_ERROR_OPTIONS", args, user_config) + from dbt.cli.option_types import WarnErrorOptionsType + + # Converting to WarnErrorOptions for consistency with dbt/cli/flags.py + WARN_ERROR_OPTIONS = WarnErrorOptionsType().convert(warn_error_options_str, None, None) + WRITE_JSON = get_flag_value("WRITE_JSON", args, user_config) _check_mutually_exclusive(["WARN_ERROR", "WARN_ERROR_OPTIONS"], args, user_config) diff --git a/core/dbt/helper_types.py b/core/dbt/helper_types.py index 84f253b00c6..77e25c68ce8 100644 --- a/core/dbt/helper_types.py +++ b/core/dbt/helper_types.py @@ -123,22 +123,6 @@ def _validate_items(self, items: List[str]): class WarnErrorOptions(IncludeExclude): - # TODO: this method can be removed once the click CLI is in use - @classmethod - def from_yaml_string(cls, warn_error_options_str: Optional[str]): - - # TODO: resolve circular import - from dbt.config.utils import parse_cli_yaml_string - - warn_error_options_str = ( - str(warn_error_options_str) if warn_error_options_str is not None else "{}" - ) - warn_error_options = parse_cli_yaml_string(warn_error_options_str, "warn-error-options") - return cls( - include=warn_error_options.get("include", []), - exclude=warn_error_options.get("exclude", []), - ) - def _validate_items(self, items: List[str]): valid_exception_names = set( [name for name, cls in dbt_event_types.__dict__.items() if isinstance(cls, type)] diff --git a/test/unit/test_flags.py b/test/unit/test_flags.py index 6f03ec22e92..36648e7b5c3 100644 --- a/test/unit/test_flags.py +++ b/test/unit/test_flags.py @@ -6,6 +6,7 @@ from dbt import flags from dbt.contracts.project import UserConfig from dbt.graph.selector_spec import IndirectSelection +from dbt.helper_types import WarnErrorOptions class TestFlags(TestCase): @@ -66,13 +67,13 @@ def test__flags(self): # warn_error_options self.user_config.warn_error_options = '{"include": "all"}' flags.set_from_args(self.args, self.user_config) - self.assertEqual(flags.WARN_ERROR_OPTIONS, '{"include": "all"}') + self.assertEqual(flags.WARN_ERROR_OPTIONS, WarnErrorOptions(include="all")) os.environ['DBT_WARN_ERROR_OPTIONS'] = '{"include": []}' flags.set_from_args(self.args, self.user_config) - self.assertEqual(flags.WARN_ERROR_OPTIONS, '{"include": []}') + self.assertEqual(flags.WARN_ERROR_OPTIONS, WarnErrorOptions(include=[])) setattr(self.args, 'warn_error_options', '{"include": "all"}') flags.set_from_args(self.args, self.user_config) - self.assertEqual(flags.WARN_ERROR_OPTIONS, '{"include": "all"}') + self.assertEqual(flags.WARN_ERROR_OPTIONS, WarnErrorOptions(include="all")) # cleanup os.environ.pop('DBT_WARN_ERROR_OPTIONS') delattr(self.args, 'warn_error_options') @@ -283,7 +284,7 @@ def test__flags(self): def test__flags_are_mutually_exclusive(self): # options from user config self.user_config.warn_error = False - self.user_config.warn_error_options = '{"include":"all}' + self.user_config.warn_error_options = '{"include":"all"}' with pytest.raises(ValueError): flags.set_from_args(self.args, self.user_config) #cleanup @@ -292,7 +293,7 @@ def test__flags_are_mutually_exclusive(self): # options from args setattr(self.args, 'warn_error', False) - setattr(self.args, 'warn_error_options', '{"include":"all}') + setattr(self.args, 'warn_error_options', '{"include":"all"}') with pytest.raises(ValueError): flags.set_from_args(self.args, self.user_config) # cleanup @@ -310,7 +311,7 @@ def test__flags_are_mutually_exclusive(self): # options from user config + args self.user_config.warn_error = False - setattr(self.args, 'warn_error_options', '{"include":"all}') + setattr(self.args, 'warn_error_options', '{"include":"all"}') with pytest.raises(ValueError): flags.set_from_args(self.args, self.user_config) # cleanup diff --git a/tests/unit/test_cli_flags.py b/tests/unit/test_cli_flags.py index 462f801e2ae..e9fea1a06d9 100644 --- a/tests/unit/test_cli_flags.py +++ b/tests/unit/test_cli_flags.py @@ -7,6 +7,7 @@ from dbt.cli.main import cli from dbt.contracts.project import UserConfig from dbt.cli.flags import Flags +from dbt.helper_types import WarnErrorOptions class TestFlags: @@ -18,6 +19,10 @@ def make_dbt_context(self, context_name: str, args: List[str]) -> click.Context: def run_context(self) -> click.Context: return self.make_dbt_context("run", ["run"]) + @pytest.fixture + def user_config(self) -> UserConfig: + return UserConfig() + def test_which(self, run_context): flags = Flags(run_context) assert flags.WHICH == "run" @@ -53,9 +58,7 @@ def test_anonymous_usage_state( flags = Flags(run_context) assert flags.ANONYMOUS_USAGE_STATS == expected_anonymous_usage_stats - def test_empty_user_config_uses_default(self, run_context): - user_config = UserConfig() - + def test_empty_user_config_uses_default(self, run_context, user_config): flags = Flags(run_context, user_config) assert flags.USE_COLORS == run_context.params["use_colors"] @@ -63,8 +66,8 @@ def test_none_user_config_uses_default(self, run_context): flags = Flags(run_context, None) assert flags.USE_COLORS == run_context.params["use_colors"] - def test_prefer_user_config_to_default(self, run_context): - user_config = UserConfig(use_colors=False) + def test_prefer_user_config_to_default(self, run_context, user_config): + user_config.use_colors = False # ensure default value is not the same as user config assert run_context.params["use_colors"] is not user_config.use_colors @@ -78,10 +81,81 @@ def test_prefer_param_value_to_user_config(self): flags = Flags(context, user_config) assert flags.USE_COLORS - def test_prefer_env_to_user_config(self, monkeypatch): - user_config = UserConfig(use_colors=False) + def test_prefer_env_to_user_config(self, monkeypatch, user_config): + user_config.use_colors = False monkeypatch.setenv("DBT_USE_COLORS", "True") context = self.make_dbt_context("run", ["run"]) flags = Flags(context, user_config) assert flags.USE_COLORS + + def test_mutually_exclusive_options_passed_separately(self): + """Assert options that are mutually exclusive can be passed separately without error""" + warn_error_context = self.make_dbt_context("run", ["--warn-error", "run"]) + + flags = Flags(warn_error_context) + assert flags.WARN_ERROR + + warn_error_options_context = self.make_dbt_context( + "run", ["--warn-error-options", '{"include": "all"}', "run"] + ) + flags = Flags(warn_error_options_context) + assert flags.WARN_ERROR_OPTIONS == WarnErrorOptions(include="all") + + def test_mutually_exclusive_options_from_cli(self): + context = self.make_dbt_context( + "run", ["--warn-error", "--warn-error-options", '{"include": "all"}', "run"] + ) + + with pytest.raises(click.BadOptionUsage): + Flags(context) + + @pytest.mark.parametrize("warn_error", [True, False]) + def test_mutually_exclusive_options_from_user_config(self, warn_error, user_config): + user_config.warn_error = warn_error + context = self.make_dbt_context( + "run", ["--warn-error-options", '{"include": "all"}', "run"] + ) + + with pytest.raises(click.BadOptionUsage): + Flags(context, user_config) + + @pytest.mark.parametrize("warn_error", ["True", "False"]) + def test_mutually_exclusive_options_from_envvar(self, warn_error, monkeypatch): + monkeypatch.setenv("DBT_WARN_ERROR", warn_error) + monkeypatch.setenv("DBT_WARN_ERROR_OPTIONS", '{"include":"all"}') + context = self.make_dbt_context("run", ["run"]) + + with pytest.raises(click.BadOptionUsage): + Flags(context) + + @pytest.mark.parametrize("warn_error", [True, False]) + def test_mutually_exclusive_options_from_cli_and_user_config(self, warn_error, user_config): + user_config.warn_error = warn_error + context = self.make_dbt_context( + "run", ["--warn-error-options", '{"include": "all"}', "run"] + ) + + with pytest.raises(click.BadOptionUsage): + Flags(context, user_config) + + @pytest.mark.parametrize("warn_error", ["True", "False"]) + def test_mutually_exclusive_options_from_cli_and_envvar(self, warn_error, monkeypatch): + monkeypatch.setenv("DBT_WARN_ERROR", warn_error) + context = self.make_dbt_context( + "run", ["--warn-error-options", '{"include": "all"}', "run"] + ) + + with pytest.raises(click.BadOptionUsage): + Flags(context) + + @pytest.mark.parametrize("warn_error", ["True", "False"]) + def test_mutually_exclusive_options_from_user_config_and_envvar( + self, user_config, warn_error, monkeypatch + ): + user_config.warn_error = warn_error + monkeypatch.setenv("DBT_WARN_ERROR_OPTIONS", '{"include": "all"}') + context = self.make_dbt_context("run", ["run"]) + + with pytest.raises(click.BadOptionUsage): + Flags(context, user_config) diff --git a/tests/unit/test_dbt_runner.py b/tests/unit/test_dbt_runner.py index 2e4bb5e71a3..6c17de6dc8c 100644 --- a/tests/unit/test_dbt_runner.py +++ b/tests/unit/test_dbt_runner.py @@ -16,6 +16,10 @@ def test_command_invalid_option(self, dbt: dbtRunner) -> None: with pytest.raises(dbtUsageException): dbt.invoke(["deps", "--invalid-option"]) + def test_command_mutually_exclusive_option(self, dbt: dbtRunner) -> None: + with pytest.raises(dbtUsageException): + dbt.invoke(["--warn-error", "--warn-error-options", '{"include": "all"}', "deps"]) + def test_invalid_command(self, dbt: dbtRunner) -> None: with pytest.raises(dbtUsageException): dbt.invoke(["invalid-command"]) From 726c4d6c582a9b5d305ce5e0e4528f22ba4bb221 Mon Sep 17 00:00:00 2001 From: Ian Knox <81931810+iknox-fa@users.noreply.github.com> Date: Mon, 30 Jan 2023 19:30:01 -0600 Subject: [PATCH 32/54] Enable the new Click Cli (#6785) --- .changes/unreleased/Under the Hood-20230130-175752.yaml | 6 ++++++ core/setup.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Under the Hood-20230130-175752.yaml diff --git a/.changes/unreleased/Under the Hood-20230130-175752.yaml b/.changes/unreleased/Under the Hood-20230130-175752.yaml new file mode 100644 index 00000000000..95a4938915a --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230130-175752.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: "Enables the new Click Cli on the commandline! \U0001F680" +time: 2023-01-30T17:57:52.65626-06:00 +custom: + Author: iknox-fa + Issue: "6784" diff --git a/core/setup.py b/core/setup.py index b5c43cc184a..f3b9c2017e4 100644 --- a/core/setup.py +++ b/core/setup.py @@ -43,7 +43,7 @@ include_package_data=True, test_suite="test", entry_points={ - "console_scripts": ["dbt = dbt.main:main"], + "console_scripts": ["dbt = dbt.cli.main:cli_runner"], }, install_requires=[ "Jinja2==3.1.2", From 84bf5b4620a7041d821cc220ca4d23f0ad219ab4 Mon Sep 17 00:00:00 2001 From: Kshitij Aranke Date: Tue, 31 Jan 2023 10:18:10 -0800 Subject: [PATCH 33/54] [CT-1947] Alias `--models` to `--select` for all commands except `dbt ls` (#6787) Co-authored-by: Github Build Bot --- core/dbt/cli/main.py | 9 +---- core/dbt/cli/params.py | 34 +++++++++--------- .../docs/build/doctrees/environment.pickle | Bin 212413 -> 207366 bytes core/dbt/docs/build/doctrees/index.doctree | Bin 102131 -> 99920 bytes core/dbt/docs/build/html/index.html | 25 ------------- core/dbt/docs/build/html/searchindex.js | 2 +- tests/unit/test_cli.py | 12 +------ 7 files changed, 19 insertions(+), 63 deletions(-) diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index c4aa4af5a0a..8d257b55189 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -172,7 +172,6 @@ def docs(ctx, **kwargs): @p.defer @p.exclude @p.favor_state -@p.models @p.profile @p.profiles_dir @p.project_dir @@ -237,7 +236,6 @@ def docs_serve(ctx, **kwargs): @p.exclude @p.favor_state @p.full_refresh -@p.models @p.parse_only @p.profile @p.profiles_dir @@ -347,7 +345,7 @@ def init(ctx, **kwargs): @p.profiles_dir @p.project_dir @p.resource_type -@p.select +@p.raw_select @p.selector @p.state @p.target @@ -408,7 +406,6 @@ def parse(ctx, **kwargs): @p.exclude @p.fail_fast @p.full_refresh -@p.models @p.profile @p.profiles_dir @p.project_dir @@ -471,7 +468,6 @@ def run_operation(ctx, **kwargs): @click.pass_context @p.exclude @p.full_refresh -@p.models @p.profile @p.profiles_dir @p.project_dir @@ -508,7 +504,6 @@ def seed(ctx, **kwargs): @p.defer @p.exclude @p.favor_state -@p.models @p.profile @p.profiles_dir @p.project_dir @@ -547,7 +542,6 @@ def source(ctx, **kwargs): @source.command("freshness") @click.pass_context @p.exclude -@p.models @p.output_path # TODO: Is this ok to re-use? We have three different output params, how much can we consolidate? @p.profile @p.profiles_dir @@ -590,7 +584,6 @@ def freshness(ctx, **kwargs): @p.fail_fast @p.favor_state @p.indirect_selection -@p.models @p.profile @p.profiles_dir @p.project_dir diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index ad4c3d28870..2dbb5ad511e 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -142,16 +142,6 @@ hidden=True, ) -models = click.option( - "--models", - "--model", - "-m", - envvar=None, - help="Specify the nodes to include.", - cls=MultiOption, - type=tuple, -) - output = click.option( "--output", envvar=None, @@ -274,14 +264,22 @@ default=(), ) -select = click.option( - "-s", - "--select", - envvar=None, - help="Specify the nodes to include.", - cls=MultiOption, - type=tuple, -) +model_decls = ("-m", "--models", "--model") +select_decls = ("-s", "--select") +select_attrs = { + "envvar": None, + "help": "Specify the nodes to include.", + "cls": MultiOption, + "type": tuple, +} + +# `--select` and `--models` are analogous for most commands except `dbt list` for legacy reasons. +# Most CLI arguments should use the combined `select` option that aliases `--models` to `--select`. +# However, if you need to split out these separators (like `dbt ls`), use the `models` and `raw_select` options instead. +# See https://github.com/dbt-labs/dbt-core/pull/6774#issuecomment-1408476095 for more info. +models = click.option(*model_decls, **select_attrs) +raw_select = click.option(*select_decls, **select_attrs) +select = click.option(*select_decls, *model_decls, **select_attrs) selector = click.option( "--selector", envvar=None, help="The selector name to use, as defined in selectors.yml" diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index 772dcf43eb8521caad6e3c528f9c036d3b2902ac..b63f39d16959d1fdc4ec850d79572950a3ffc140 100644 GIT binary patch delta 22178 zcmb81d3;Sr*vHLW5|M-mlE@;Pgb*Q?kgc}TYBeI%z6C*JyTnqOkl2cY zODs*rUbISS?P_Z+wZ&Gg()XE}b8pV_zVGKAf83mV&YbV_%rn1vW;ypLeC0OntXt{q zGI>s0OHT|e^I_}tyPt2H9{AQ^mh^dj_IT!GHg`%iTlJZht(rZA4Vh(S*>kG1e`lqz z3U5cU?9VeHIh@3>hO@1#XR8IwV~&;if9=9L%=cld=A^K?Ct_I0+)Or{)n{ktTEE#H zN!Z1CR+co=1=ft9?l8PI@nD1Jr@)*xq~W%w^Q)0s6iFsCbcVQSilb6E!EULx2 zF35x}n@JPq3|6@I3F*SdEwr-Bvs~E3CEl$3q7>NsfJCvGixh|6T5JW^M`Sv4T4H7O zmW8kdOH$x!45`VymP&WNUuuQ#ipkHc=nE^mlI_A8<$JM|Whrb^&syw@rt1_Wy z2U3p(uC~JK4&+z1<|`hdV3x2Zg$0cdXM5Iggfm>NL~64EYpn%AUy)40HmtL z#IG@ko0_m4UuVLpBCLPE^;X!_joe}FHd@)uEj8GcjWwa&FBruZTjS_USIGZ`EMnf@ z)MtL1+p!gi<*1Vz8?m%A%h=MDA4d16!Uk_!NW$2>U0wKzc?nfm#`X}>i0$9gp110@ zHs6F0(VCX+Ru*3n$nv(!maMcxR`>7@*^(!ATG`S))mgU!rTuOc*xE00kCpve7|M$F z$bh!qs|57fUL~Ld3axBhQ8o5=p$x?6?^J=PvCkF=mm({xddQzakqktI{YoGT_bY)| zcM!ci8pawOQUY=RoGV*-v<^1f>7z$j#$gc#lFf?0@4`+T>54}=`@DDs$#N`_#I_`r zquK6oJ&Aa+qT>N%GfO>M1DXxS2B~+V5rgk#t=k-vwQhIZ$^uV@vMa}xTBn_`)q3-f zR@UWo2y1at*82OCO0CEIq}2M(DJ%QuOaL2yTGsl-X;rP){%ouD%rjQD=$s$(K5MU2 z6EcORUQB{(uSp2Bc};xT=JQcxzU^FF_VI-n7I99t`HJ$q&B<20+J)kmc?n+Z^u-Xm zzdUQ6lFVC~9OHKifgBNq*`^EaVD2>HO3K6aaI&8Dx!9E+!DAkl#a9B@(~D9CBQD8! zgkF~Mh`3^9r!KlcT5a^H;EEEIrdMr2nQ_g^>fZ`s{?}93SGQWQFRv>tn0&*^-2Ujn z4&1b|^M6FJj<;mg?%YzMw)A%qwG8&rAH!MB-2g^z%cy1DRzbD=HR$cLGbRgTVN;g77_{92SH?BOFTd-F7cWj;=UEyGAH z=25Ipw~#o%qVidOdbxg>QUV#eZcB&G}chQ1jQa zh2D9Caik=a{qjb%j=M{qr4%W}|FQg&HCWI8zJcw8bT9bTLMjp1LWmWOIYI9-#2;Ex zk^(=LA#rew5?Lh}YrzueG2AUptnk2@guy3eh{)6h&Q7E{5h)t7oUz!P7fz5}j(CG- zSp@Mc4YSK4h>si4!UgZ=c^b-api>wI565vh8>Y0zv~VJH1+!}Kc%!huw&Didj|)V4^)-LNPm2^|$gDLQId*RG@8^{|*-M@i8L5;{5_t<(r6)W`dUj#@Xs zBcY=+4X~WhQHO?DuIcZj0XQ`x!=;d3H^Op4NV8%TAvra+;kGx%i$X{*nkc4)kS4_{ zLV6Rc7SfhDyO8F`Dh2u0lZ9xL5vzIN0!}De~kJmLvKXFKd$t}oO*w>2qfM-jk z;pVnf8m`6LO2fU|3cak_hE#`ht!xd~yS2UH*1e0Do23wE=++i{T4Ea|KnL5{%#C=@ z7NDE&mlOuRH887nL z5u9p|M=d|ZEYL4S3H9|9b*Mk@U=Q_~j#&0q7j&@ihrFhnn!v>m5h(&Wy%S#K^CTG6 z1&^G1kgx?^(Z8%7B(7j)S1v6cUs%u`ukPqY!l8B#TYFXNO=8G3INe^h*C-g=Q)#M) zP_ZZJ2CaHwD?Rjv5$DKm;tu=!U<={b=97powCPK{NC5QdOQsgM^+7uYk1vz{1kR?S zp}_H^4BSXVmwWd`XZTnIzxO4RS-qraSkwN^UWTv=%tet3*8=rs2f&vh<9}8GeRllIT9P0 zPujsJ1B+oDgJE?BvT)uGbw;5@kt8pT!iyq-jU9tWYcnwlE@P11mHSjI z#s?HOl}r{>cW6G%F3e+}Vz~>mQ4HOtV>vF&^V1P0ggIpfUKGMSJ`<1ro`>19^Jg~c zb(~M$!vQil4;MID=Y7EAS5h0|7^wkSv!n)Tf*+sS!AG-Eg9(06+?{yAlw9n}PiJHO z{>eotXU$P}ndrIb*;@<90+hr&H0idMRDcc(Nfq#)k9K+PCyRthx=5tju*_CfMWGA>I--X;=CT?6dt zd*C`-js3n>y5r5S$r!l14b%6C^*FlY`UC!6uS9R-1|`N}o3K!u0u}+^m$k zv)LBoEn6@s%}x*}_GBSu@vRs@-X-^LMXRDq4*doTh%TA24UdAh5+`sg#0eAGj#$wz zXKc5**mwsP5&iPaPCP2yhk0i}0hWs@!sfYOfaQe17w^KWAMYm~B860T zzip6tfk(W;A&_$bNxUDwI)HwOe!T7=UKIWK&qH|B;w0vkv4<5=Jv}U7%EWN>5xcNH zJ&I*-oW?p-{vONnzP$K*TUFmZh8IO&K6MYE)jSY5m5aShOk)P9_U0HLi`!!_7~DTJBzoz2;@@@IYePX$hnL@;B^kz za}S+3d6oFnupql)`~E8B&=|bWk>O&(QR_Tbz3MFt`kwPVLYNeqTu_?k{sjzVQ3R<& zV8=zAd1c+k-qZRL=>T)v;B4%NOG>%y%Qnq+yh_HyI!}umoVrIm;NDg21a8R@1p|H~ zF9@VOKnY}ASHxLRe4WcgXmZ_6-T;p=na1Bzv{rEN7Is;wG56cVict-MRkzVco^5K~ zku`~d^LOx8k!{|)hes=(G7vcKOXn-qR`KIew6=iBH~&t3LwI@;`2~~Z zyeIOMn0dt7zmj0;m$!s^kv0(g)TWnu&y@_9BoJLmZ&^MmM95{R?VDJnfWL(XkSuw- zm~ceDRBoT7@_@;kCWVL@DxZareEX@?C0?+p3=Je(B}T~ZmsbjArwVga-m_C+6!raL zKgUOzq6q3FW=iCD3FFId==g>-bJd=*YzLRs+Y&Uq84H#~Bq z5qVo_2j=m1ZKzAAvf{i+sGKOTrxcT@C-s3?3zfRvYoT(zIlMHLE1bkK)Cw!B&`?~t zP(`&JoRm_|&QzKBTq;Y)h$K7I1yMZ7l5%KJBw1%yM2I9i(G8EXD`3Tj_@RS7?o>&# zx$ZXa-Yt(sgrGet;88#|8VFxhz;Ysa`gtgpfAqkMB6*&vh(}(*)E`nk(e#~ead6iY zX(AOvC9G@f9@)^K3YPjgl!n2PDp-o^zpa$A0l~QvG^lWON`VCd z4QYyn9ttZM(U|U{y}t}ARVsZs|LAAIk3RfkG5-qqF?yEEu+*VxqsCd&GIFzWYo_zk zGepT5qSTDh=|9p^rT8Vm*o9vt;{rc|u!8Av^faMW!{PLBT92J-=n4xGs1HA}C4t_f z@!~?~DuY^#PCv(Qi4Dh(qx>i)9D-A5<4O(fXQJ_Fbl>#a;VgKR53Lst6%(j8ZKV8- z6My|-&s#K(b`#t#kkpjMxRo$FOQu^1v!i0AuH8y|X{DaDqNfjOMgwV%Qo6R9oz#x8J4}RDNGE*tlrI#{O2c0kzcNqUi?7 zt*N(bNgI{|(uy`wHQ{R$;QShv0BH9PDllBoyTJZ;sGo`+D(J2y0tpf)DwXKt|BejG z5f`e1sV{A#z?XeM%RFlqt zW4^2b(Wg*`vy-XX>F0X*6O(X{Z)s)fR}-3liExsl(TES^x1mi`Dy$+-IIhAvS;V8P z%%gAB4lS?YpL?y?vcvcW3IQvam{TA>X8h`ho3Dl2vQsBS!q5&$WJLuubj>AQ29H&I2 zV;`zcjbrrO49B@8sd1^|^r2KumRY2SFK`?#QsX{_8`n=Gk8kzd-Hvmm(_Gj$B4qa3 z&|lMxzv{ly4c4z3K= z1bw#Nz-Q(Ruz{a@I?I7*yR=d;Or!9vdIOsrH^93k?lFo6jj(EhepZh??KsxnE6G3h zz*JK;=wEvFGsoGyQ{q;|so|(^Y|C;Gn4uZdd?9$aM&#b&ghN6Ue)DMsX(kPv`k0oh znvpgnt>36z!}=;wZz;~CCDk0GUu`(O0;e%AM{DL7ee@=Jnlm8=|7o@5K$JBQXBWf9 zQg>BLOx7F7HfI3%_)ON>qotl8eWhowbewG;p2-iYrfxf4)6~cG297vxfDh5+5oh;< zeV=Fs=lgo>UB|IvaQ2LpSv@q1R#vU(DnyFat!m-rCeJvoO))$-ij-qNn?M6q`1%HX zU92J#sbWLv~b!^OGOi~g(Fh|I;oyW>1sQ6-wc4TfHrFeF417$rqhIA0?ge|*M` zo1v=YTUDNMTRJ2CL{5>+025UY>DSjcoR_^6>Zz z>O>k#3(3nggK9&Ag=mu&pzU&;+I5rkQ`TjNxf9yFAU(tCcw5l=1UH8dUtSJMXRdlMFx7I2|CXK^pft;nNMYp}06|fV`%hxQJEaHU2N(8ZBiiFhVv=0nlOUtX47ZG}P zxJfo&A}OoicGCNMg_?bbwt5R~%vsefV2u#!Unw9%-J$i^av|JnI$-r#hpb5-; zJ$sJhY@QIwF6pwzCe4ayhaUTl<5+t(B$p%`JC8GDZ`Q1h&g%`}Lv&P+<6;p>@eMaH zUZs;)dhAPcSbH|4!O?OE%Kny?fuY}Mf?f?$*JzaPCr&u-JkN;qZMB7S+cXxM>n*(H zxCMJwqywY*OkkucKK=FFR1;kORBC24pA`*LAwSh4r#OzZXF|FWUFFY4f_72j7hQHfAdk!CKJ zX~2&&ffuvHFBQjmR#Z8@+(2Gxf-Lf4@ImbPhn1+mIfRev;h^Sm{Q-lGedcUL(>o=2 z_4df`2J#IPWP4X7t}w3D z>1Q;v&X#(3GZS$8s`I0Ua;Ug)R^#~qJ-naeaPhmJSqmF?wM-%QKD( zQ#^vIr{GWD(d>fOG2m;N#ecx?{kCg&sT1v>*l2c7^IRd>VB-T5HZV8=P(BYQL&<-l zjWLRiEq`hr8D$u3SWVav>1>fg4tb#2X zv0nax@{B|Bw{MWpcN@!WIrf<*TedRbTbRI$7>OZvuu_vho@;v7AOk+l1YW3ew&M7~ zmm0@EGmxj7AeX3etKxY5e>9G7GT_&nzzbELMKYgttb3(-IC9!RK52q192W!aUyWrV z_Ia&I#Lo=)VzYSF2Bt6Ue}oILNpCd6h<6jcCM(M`4r!9-GyX$TWsfvbns9fvIQwJ_ z8a~$81>y|m8k;c3BPMoC`$$7Qh($f(_B6n|nS{$d)5(&4m|D~e;cNpv%LLs%o|CU6 zy)-ili*clEb502M-1SDCcr$kk^2&i&8ptkxN89aVFE0~F9!1p zu`+RHmbIvVH{lkm7p99mg8t)ex@OdWi zq7}q*>+h6S2&-sO|7d8h0l&)xUZm#hf*kLX_j82!4_-<^j;AGF)yP*3HZGa4A*vuA zF_i}5YUv6+eNadKl`ZPU5W%0`X`k-DF>6AVsB1_=IaMs`NBq?dHiG3Dhx>!bobO3E z_6GH@tKKod-!=)CnRBQ;M(QV%=?3~h6LgW==5lag@##jU0Y=gIPMkOrQhhsQb&7Xk0XU#sdH&wpVuPPNO| zThHy`I9E(sr^d_KwnsHf8DHzj;R8Mzl4hLPOwyZ~=(w4ZG`Us@{N8Yj`lYtjdia-) z!$p!jQ4;eq(72{W{T9ymdiY_-;r8rEAI9@3Y;X7~!lHfx|EC^)$0S^p7c0UwUrTpz zuWeDkzEK{Et7=5)Do!{gO76bcA!?W)1wSUzqJEzxTF;I$$CfWBbyncF>sZw9vvkzM z+na=ocaz2^@VF0E34W}eJIZmceU?hUK&O>nS_Xcqr%}vey#;XGg4lQ}N+{`4(6qiq z{ccjB9=^wMxLAnaRKlI#K;!f^J@<;^Tp@GuoWco;tK%fVqmf1ZE};eg*BFfkONkS1 z>X%M|#8?`;)xy_u<=Yg1@AmRDC1rBud$6i9V-026D`!eL(S|aSl0$2S!;Hq3K(e-A zU1LioNi2o7vvSj);XB}2xua)U`dO{wrSRT~=+jv7<~JU(+=v`;n!m)Il-?gtC8eh& zLf=?RgW!SU+aU3ITwMPMk7uN(<7dy@^dX6`Gu9GIhb2OBtR;y5trTaeS#20zM$0qB zpFyqP#~0HrBQr(`Ggzi;oF$Z(oe+m_)GtefrE!)>x;zn%$64xET!}ZOPvG9I;zz3! z!9Cs*HZdQ+)J)f2oaOex@cyHRqy@DZ)<5mz+)=qXxw;bR8}YVJbEoC@{WN!$Q$||q z$bN%z)3@QtK`qk%SEOqPQ79-mbzqu`=vLy<_CzR*xA@RKiEu975*59SyU9NWwV0Nh izMg;EVEe``DdzCn9{vq)i%GC_q(>49W+mWD0sjLEpOISt delta 23058 zcmb81d3;UB`^U}PNRWs%64@77C?OGBlvw&z`x0AX526~xO(c|BODPS)L>{#Vr7da+ zqP7Z(Qae>^sikU)e0hoH=ve&zxsI^UO1I&NcOn+pgDcP7{mI zbK2-MA)t8iX7Km){4W9;4}rQqq#AoUV=}ANK8mgV(hRj;l1k9Tg*0PdWSQB6`6VIV zk9acQneptvjA*uC=Gfdmq&{mq%glVUhO+qCX7)0x3cEf#9s)y1bv7h>ESwlh>axmn z%#d1z3l_7)%)VI|%=Rsbhlk~81dCfL%@r**Lq9iiik)6& zW|t?HWb2kxVgvHxp{zTa`zuf3z;ZLoK9SA>Rtgufwt1dx^U8R*(iWQ@{k3FW`Py7i zt3B}|tjcQM_E5HEbv!Jaj83ZY%~O(oXz`*0fW#+t^5&Hbq?3~fY8vHjoIqSf6v0!}=^-k!Za zI{(W}!GvAkFrPKwY-V>i2D4L}m0s*sq>?BZ&0C`747n8>k6e?>g`qB za(A!AEzS3v*_=XO_S=5x7Lv)D{*pk_*_^#SSeXODCp5h@luICQSmA+0yb^S%63gEo zO7lufhqNTC_&vf6Qp1-IDGmAZXXRbrxYVwPRXauJ7Z?)C;eFUvk}ZZs3V!il8&`!%Nms; zQ&`feE@U2_C(tSGEUK4>B8sqc@&U1M&ifv$+*u!*?=GLphYdelgKXmrDBGI^L(M0| zn}z=xNe)=b+Tw7m%NCyRAdH+rBf)a$=3PjnXE?GFYxP?&t9emTLBU0($;p=#zmO|t zR_>xd%ekWRi}&wVzrGuZLR=KQx0OArQ6%&^0oRAp73c7}RB zc(ClLg8KfMI#!vVk7T8vda^gq<6(y%2ARn(#xmb$bMx!Gj3(^rb3ZoyRXpqPA|~JE z?_dJI*C6ri$Jb_98%%;(yML6PIs1>|l*w?W9CBBGz5wiA&Z1<&vZN>@iE%_xr5{%0bw1s{5c$l|v?N}&PA$-&6k5Fc+!q#v8vaMFL=VX~gni4YBS@MD1x|{U~VYh%I9$K4zqafP#9k2vp5Wl5H0tI7ZC`t%;N>)suIg!hsEz<6|SS2 z)se)faoAB^>8jy1EINvcM5NHsfk><-bkwY-RY$vP;boztHc@z4=;%n4<*~;_<5ioE zGHWaSDRkr%qv&XSjHUh6>tF@0qtv>1qtMacbrCCcG_s!3xQP0AQRt{t13VHsf(BSk z=%`Grg|;geX;0m}xT#diUdI`#d8n;bwm{yR$Fx zfsq5SRsXz3e>@#PCbR4*QSi+`Y<=IeWFP@^GS=%lfCRytWUR;M9+)r)>tR|BwLij( zDEb5-gYoEUGVz1#!B}nZD^jb#>tk+h=ij6cd@uwrrwzvb{&fiYYV5SS&}FEi9_L|b z?EMsik;N?)t8E^FL;kB&r0^vG1Pw=@@>Cj*50}=I$TB32n4LXpW;P&N(InDGM3b(w6jkE6Etg8@_oD`T{<2^c<2);PrfB&Rv0n<3mO~0KUSyLM(m@EIvH4 z0NWtM(r6JL(Ok6m+ak42x-3@u-+75uC%5qshJ1yiV*FBM3Y|P(DjmLZnMEeq%dytF zMdUrGvVuh9F2)S#*2p zJ8Z5P*MF=bBca_Y?0xfE(wU8$RtH|LRl0B0I`o1ZxXspMwb|d|@ISjAy&wi|&kYt_ z$&C`H4BSsQ5p!|Z5y{B|<;X3aZ|RKb`IgRTv>9&^ol$%X9>uT4aWQv`x--geRXSt; zR%>VMEWo;()}xHtY_oL6zHLgqQ9oFEBY8VkTfdJu!>Lv{QXXx`x?<#}?65T1YbRE* zjoco)@HBG^KHsff%E;}%+cI*W|A-f>Z^Pg=c@L5nZ$k-|EL5CSP>2`*{DF*zxP5qZ zb~}#Xll!omnCLs~R~q+vKVB3QebWPYba)rG>-YhzCIWHZ0H3#>k=Xb+d%HtqF1h4*3! zPbNAY#)R~G0E(}p2mti11l$I43YQ;z!fAL$f!q!d3u1V92JgQu?@vF9;`I!)BqPU; zk|yY#a#v(dn*!&~DZZZm8$PR;-DaQ1qmm~{AOu}N?}%CJ+Y6S??S2uj3VA=dgh%hr zlTxtwG-`atWz1*aye74w>=lK%`wF^JPKC9uk}=Te9NIo`Rmpn`nq4E$iP(sO4%g8V zk0}Mmuk$2UsCV>DX&@NfZmHB;F!h!#(HVIg>kPPz;l}wdM2Hab*crOfu1e}iK=iz(6j$wwNj-^(4u&Ki)k?p>sE7MO6bsVslDaf0SSW+5l6)-h9mMbD zvczms*dG*ZRptCwid3+St4s7APs8ZEU=bb{OU43WpC@Bvjf5-^5Sf*cAFO2@^4?<- zK!4^95&KY|Jtpt-$cc{NlaCkQqG(AjSqoC&$~zLrvKx8NYeGZ#w&T3}R7h`y+q`08 z6yo2P_s_j1&1a?MDF4t_YLFD{2Sh*{LL zJl-JI$3K zLRgDl6CooEFV^;?p-=)hGug5mUFn=qks-|rlypM4?JhbD%J_VW~vCP_e_FtKosD`OTqF zGn_`!9DW>7Fg~2do9Lmif>YJ#HrjVp7!Iiw{LcUr{^`g6Ea9gM@lR$}$<(A_$r+=~ z$s=;c7v&3%#@?FSTTM{Ft*`|WSr+m3&RTjszFZ@>K6`0J!vg=BE}V7 z)TCa#pj<6_hsKEu-8}|3%S<`PZ)p^ce-84aSiBTX>s6?4EvbV?nf+60hO?jyFB%gL z8*5Wf+CcfYvG~^)e4=SA?IXAeus)jBb+a*hN~W8QnV?vyZMD)>T8WWXwDg8_k^*iK zLw!`>@e0NU{?yV~*~)*#`9eS)lxUv9oL7f>sF+LamMN7ct zy0o+kyjy}*Uf7|SvUy>f#S3;$j(Un?|J^cLk=_W0le=gSm>ox*p=EtqMrGrgosBCF z*uakUg~koAcU>a5a(ci(X`+K!Qv?V8Q#;Tg7dy=YD9MD;mA)9ZnCavnbA5O3OVO(_oc_VR{QG z#x00`4T!^jWf9hX^-#gH_3)X-;bZ{1Wvk*A(p2M?d_8xAaV~dDE?`%lR~(YrOw*OW z>e;6qW5bf_IFT${i!T31v2edR^;dc2mEOX0;}(jNX)PL1l`A{}`u&eqQgMBuu$xBV zl|_Leg*RCWS^~V=XuSW8-on?$Ellc;y?RW6 zW3QHgnQf`N%0Wl;@I%JoA`UF^HC3iXiiKzGG>U(qhu<|0#}E)jD^|0PerHJSK+C9F zUJjb})QHku6c`evX!*nvrt-92HL06Z9W}a%(Zi!0ga^F9e5pq@*f?h6qm08vWL#EFP8i``HIZ?#p1Z&}*Af}YK6uoVCc?sQ8h`B3 zqjwocbAgjv66>0v88_GU*sI2|R)LU`Fy|v|az-Mps1lzuL?vs)XA%X*#m5sP+))(l zOFcCCL{&XI%sAVc5aBCC!S?GZIb=zAX=*Ou;BxfhmNp!&KA9; zO~x&yrJ<$Re$&= zkJ0A=EUrO!seb*a4bbL z`ybWA4;zPzXA!gi6Va#rp~+BMLDjL3^zi$};bQi$R0BE8hUOo3m<6N#f5lC(;e(pX}lS|yx zv;Q#87Q@9gQi|i?C|XIihfo5GaR*4XPf=W!IHqDRy6D*StP_y|YwLVcYg zWyZODjAl!yy&b-d19+QIGVm_GKCCcS;PCOICWD!+GlqyzJ;!jMX6;us>V~79S0X+IT zkd--C7Os4TOWF{{MBr3f3R+I4I@Tg~d{#b*lq!Izo|{;?kZ&^TO3gzwzq%dk1~ae*}~PooeIIGLxB zUwKhrNPeQ>*8NHRZbTS3HcR92`g#L(j2l3YE9;0plKuT0>H=-DHMvz!y@3SB46v-H zrJ?;C>P#kCji@)C#_Rc?8s`fS?OBCux0RCZ$22o^o}RtLINLh-$wA4UnWO31{d#tx zaW)_OIfmedkf za@T2=%b3n+`M5EK1%~*18HArJVB7n_=5d;ZY25LH9=p{zR&E$94Wbe%ygzf@B>TaMU9=}$U9U;Ldh4-0jAMlutwEAb z#@273Lsc2$L_K_*ak$7BSE(JM3zIAK@MXr~qUB-*^Rr?>7bg$s;rook#VAT>Ox#`M zYrlKk@k#LI7R{@nzw{PvIcR~udd3gqI{l1M;L%p+ej6P77lqH6wD8^~D1oz)Y zeO2I^cHrs`fUV=StAzjdgC+>X+rc|H02dSLClcIphbC-g+JVy@01K_~Jw&ol(zor@ zG<&`seVzk!6iXD@Ey37}C44dlrza;3{Fkl%9d>rMIbg@Ccydh|V?WZ0s-C@KN5AMO zU2N_+)sdmGc%jBg_&KI-YwE3|c$dBS4mun;d_QB{Uh1!EeHA-=kSsAIc;OrI6;Nvh z|8SpXKjT9?{0ENW#rHGA1inY^G3o^Uf6|ypu`}_J117Ag8O;_Z65z-I&4$lRJNyg> z@WMgj^P+rQf$~q7Vh?HX8|?7w9KefjfPSqbcQn@iOubY+d)khC!U3`^=X(VmPt(5M z|Med2xt*CO4w$j#P1Lup)Tj3mjeja*v36&+k}NT#PqCX6S6BKc>lclG-nSz+aDdD^ zR_s*w$D8o8F;u2{9_ecb@8tkoq%@z`g-)kwAMX*#Lz4$&*uTL1%+ACF2TX`}VBg>k zJSLnwp^5rm+mV+$Ko*j>f(Ay3` zMeD_TuX6P)9qR;sXEkdE)uiu z3jD!2%||a?^zcp&!o@5#MuGP_uSwuX>ERg;!uwpnPe~WolT+Z)3mW(WJscc_AHPV; z(>?X%>#FSQ)E(MfqHZe3@6ub?Zrp+`gk4vBv+lCy<=|C4{IYSl2w=|jJ-3B%u1(>?>sz~0 z{nhJGJ^W+ia9bWQSGGL&re2C`BauId; z&o5ri>P?+8ZptR|*H-tqz+ZoA7G^K?2A&x=AeK(})7dw;ChPo{CIj|qpicq3WQjvc zCrvrL7aX~xnW!7v!5caN7hAIfWvjXGq47QHu9}OI?C5eT{R*=1gKBm7T2im{-mK-K&4l@6M-eQHaNN^=>HG|Q;N zc29KB0TVptkVn>zP;VRFw*%jC04#!z$OkIK$~3b56U~dmve2odN&WkP(xSkSHbpK_ zH&#S9;U+cBvaIK<7uT&Hm)vF(fA8x$YP!0z4 zminu_2t6Es|Ay8K!9}XmUOYwu9Dl1xq1x!VEsb+|2Z)tgnq<%GWK#dCB2|wbVjL~j zX*tsKaDbT9zZ;sP=guzS%&Pd3gL zj^X=OWUXY^EUnSVw|e#}2ifA&^5c@7=dS6{U-ays9c0_yhCY@Ce9M~DpW{5#8@OlO zfGy&BG!pZ6kXuMb@`(SAx8<>#b_1)7EHNZd(Z?d<)|cjhl{2YdEY`8ZM>~KQF}J5e zcB)`fzgbMMBX@OxY>m3(amZ9OsXxy9)DHZK17Hzz^Y8|~adq3nq)zpg*pU}HKo(JV ze=>c7UvlyMN-X_?4)?(9M5*7=FXf;5A1Lil3g9!td?*kCdXlHJ$6y zSg%SaSz<_)BFfjm8+acq_tAK&njJaZ0kZIvNVhvS=DO(%UVbL^2a@gV@U0!d^J#>R zz#DkOC;4kelG%Rp7qxcW@E8r2!sX9p%^PljyPw~W}_9_RIrZbzfytlbq;!gdn_;HvG(HY0gqxzvbwgCjc;4pj{3sy~x`mrU zle^$aM2hxemRq~w12cyv2euNZjGRe1x*93{@V03=Q*-)H%gJ&ck(`u1U~o>#AS@i* zEX5uPSHn#n?`e&se569Snn%~bWrEW5VQ~-`VTz3Egf;LvICN@GN*{jK*K)>fDF*S{ WFn)%&O^7gcp`+sp&PJG`$o~NVXeb^4 diff --git a/core/dbt/docs/build/doctrees/index.doctree b/core/dbt/docs/build/doctrees/index.doctree index 91acfb7e6938c8a26a779c940f0368dc81f3d76a..a31c50cfc63f3cfa72c2ac1e2b4edffba1173c53 100644 GIT binary patch delta 5889 zcmY*dd0bW17IvMkSAh^N$UI#@1O$W&*8%mH;t>=jxi|!lfB}jV$fTknAc!|0J2ApHkV$+ad?3HB1>gWFWX;M7y_$mx_ zLuAaUTZ%RG*M6W?o7}vdtnA5|Xgjnw{^%731F$S53?m#`nPqz*kCWi{~68FmbF4Kk@RhGB#by z#?`6HnpSu)U5C$8jjXgAgkVBi%Zx|UvYy8+)y>IocZU zObUc|$sz)KPfkMP#Lg5g4UEFNsWQGdDUzO|r+fy*<~e9=Gc^VlGhxN#1DpYu zr^x7%>xSK?8hPZuBM$@CPIa+BtjSTMej!K3ZF$}pkt-rzkt-seohPGvp%G1eWXSA9wfEhXcO<+z>7r@|$ zGiCIc?SsF}RN!cj<7Wp*C#)&j;eMN#bdoOEFhdOZ z;+WYsShBDK{7mjjylwHu!f3doj)E|+JPiNy5kIl?mkpll(HFEBv(%sr;unR2GbSx` zMbp%Gz?)txd948tgryuA2<4_8Z!6fK)1pZF6uTq{3m0)h;ORvcRIu-2l?o0^WW2Y` z3rhIRwK@+j0Y5GgIBZ@<9IWs}?Q$b-u879W>n5V%5r_=vX0R1o~rMg+L4b zG;a=dD@D>KujIH%*0!uM2@I%5d$g@|!J#OK^l#+INfEZ3Kc-bWkzuD+$#{b~VTVeE zA<@!MTMIkZ5Aer3l>)1DThu-F#NJziq&z-@L~mkh#!97p1nDcQf}|C;_$W9MJMWLi zZ$F`M?k~fhpHh6>U*j42yYHYs22~lEDS!f1S;Y?;j;X7K!fjY17W2UZ~Bl{*C0&vS#aXP`Ysr-Q?R9YCua*b?^N?_%`O?| zop-`#yEsiC8XxTp!`!$KR9JFgx6qLOn0_c0Qs|oj`eUD(RLD}ErT+E^Re@5Gy@el@ z`#3*fiSiwL*XR_MK$SYu;?{$m@xL`HSH|yGxnejVxRQUcg)2WFthqyDrLrG5(!!cJd~}Sn1_mS4hB4C|XpM$%7AV5MvX&~pN}OVD zrx0gftp$Dzty78fvQ7{u`KTby{-YwzR~)0-M~rDcCKz*HSws;REIY1N`nuzMwYg#|zggxiY=o6ebSa;$OXUp;LTi6nKQf15Rlj2lQPYSjyIK|n5H%^IGAo>T( zeu@6HhEgx3xejdbbhwPuPRnSw(;0SQ4!1rBPDv= zdM{T>a8!~O!f@v$7d+eGAdPm!^xEN=eL?K1@q%c}<~Iz)?=LxEV}sD(?iVe`ls5A@ z@Ww?^mZUvmyyKD%-@D?CS1ySl^}H;CUzzx7;d|!HbaAZeTk z>IkgAU5*FurDONIGLF5k$Ev#`A{*|9NnI3^aD08gKTf|VQdq`ww`0-cek4#M@?fhp z&WXoC#)AJUi0so=Sm>P4}7c!1~<$-xGMB<%)p$bs!e}w>T{adco(+;g)s@a$Q zQf%dymm>RIUdhZ_0#_`2CDQNItCnT@zUH0ySgXoj3tI6XOy4mHFT9S%hKF{nrv~gR zyMij(AF>S^FjUsTM@m!9P5{WPwHBP2pF}1~PtFP@*ee=w_T(-4T-Fvm*dPskRvK`& zUIUze%(*oMm0ECilpfeVYj9y6THqXH#ab|-et{o*s0ES9(1TUjfPv+<1syB5h4ySs zTQa6LoIPm^Z%~lEZ$;B7jX2AWzY^H*m8U5537`C15~gi z8v>AYn8n#bB#r0jAY9tzie>Yo*exA+e%y|>O}7CyqaDaKdmLaXuqr#6-=qgSO?uw= zeAZ+K9eA_$ro9a@8-nTVgbqs!2HxAS^}*yrcVngvA)izOxUsAdiq$p) zc(LOl|6p*pP%@bp356^_jGlT$ffM^FjJ83SXcpMHWxudpS;-)+_26}r8N$gtvkRr; z;&3w$ehY^KJp7&p`p7rivm52m3VifnoIZi*sX zx<4~#Bb`UdUcWA6b+;$DvRPeJR@Ham69%)mu4Yy}rYCmz9Xd!tH?mUFc5Amc4-LE6 zxh{e*EU`Q7k6)#0|M61Gv@sAY)q5(*oB|EX#|Wifu*x1PpKf_7d}9CVNld)u$-YU2 zEt2HL9?ha8_hgG@2qu<5FNji#1N*j@>gM8QA@R(2$d$&*b)u`6Qf_F(ejG^w8y5=> z5GUMI2Zpe1y`eMj%|LPQCyf^7gS={zGGQV}6ZsehG0y=I0mZ_R{+q*~F9D?F=FGl? zVou#MmLgLQTpJ12n7_$^?U4y1YU`Lyzc)fNx1X8DPO*e$*;H5BAMA91#H?Oj3Lue}mi6^+Tx?vPC`kxF78p@9uO!}QZ z47%;s%qRhtHjFIyj-d$E4&#z#%^r<`C>W~Pgkk-{4y7GO)}tT$@IY>%;~W%4DXfj-M-z{0<6xgf+cUl<%l{#FEEBAp9hJsn=jt zBURr^V$r+8l1AGdGT@nc#Jn z2_=|jJUB}|^ybSAb4*ntvcHTiPNj&@#fD|45ndOwsTdzkBUbEq?#JBH)v#5hb4D&@ z{^KY;KbZm!(ndWmP(RZP^Z%Au()(sE-lHehF_#zHTAe<#lpw$&+#cY~2sepv~MYc*(%DkjJRaQ$UsAw zn=*+UdKD1|c1===Yc;uL8?aDuHb~Qbxl+Q%mf^@5#Q!Z*$Y*e;;H&LFD;bHPd4;>T&A$n9@4;E-sGoQvsmy9MCQW)#y*{T6_q^t)o{STkz@JSM&L zugNKgTW<|~?$@f-#HaLY!u|<+CpIaghxDs(iPhBfDVYT`b2IZ-eXuHMqGDG*VShO{ ZDKDJhtGwiXZ%^1eA&i`A)+{C2`9Hrc3~T@Z delta 6036 zcmY*dc|es_(|1lFf~Z{3%OaQUih>Ao>6%e$=;H=1fD7)52%mr(h$xD_u4%X;qm8Dy z~*1V6)ioZG1l7!o1_F`^|}Zgm}20+60P{@ws?G;Vl-(yX3~Vbobg#R zCrrxB8ef2^sjuLkhHwTR6FkPpNNNtMq}#% z2DR-9%dm8yfnG`O*sZ@e-W%w|(}$z6U)tXCok4ae8*E^DvO8WL=!+SHo%pY1QCKlJ z5z{Y5!XaEa&>=o>D^FhD0Px5V1C7DK;<6jwiHxI!TnNQ!Llet}w1vA;5QB$VYVC)5 zcr9JNsgDnbgbS|*atr=Z6ON;YYvaBhZeZ-N+2E9dqFDpHF)RvS9_b{Hz7UM1Bb5bM zf0RK#^o5hSdz67;BmG5@)--8kD9pi?BOSOY-B&q#HQgwipM!B32L3rJ09R!=G3xs$ zyp^F94I5*WH~gUq;J8c!3p0Z8WTq28ykf_ItV9gB+6tFu8MtB0DeN*<$EqUJ8{Zu3 z#BZFovTUitjdl)zFf7V8$`f`M0KSo9;HzW(L{E1NEgB6e2-yzN#~m#xUGZ#g3?}%v z$Ysp*+n6*i4*KA!9EX_c&Vlfyjh^6v)$tw}mD>vD^3GUXz9<5>=SJe0@dt3(xE1I( z(ZEsT198ekoz!y^b=-PP(n*Euye(Ruf>-h^IBcT7xa*FcJB-2!`8u=MyuiTfydW$t z&}sdqK*yr{6az;te~5SG-vXN$>Y=hDW14}L1rGeCP{-73s*367(+q4^=qHwYkYxZQ zP}vSZLt;^L=sELVrAL zX)X}}muroIi#T_dO+f)}O0D8z!*a*&d4M+-X%8>rq=n(4f#-kwb!1^UG(t;>OmrhE z8U#)8#YN%J+$JJTIs}_E8_9@m6}sB z$_)IWv_Hdh^SWp(E%U&fl`)u&I#LHvpC7eKMe5pW15?o#<6nOosWjf`5d9lrXuD46 zMltk_6` zu-7^#ZdwzMo7N@bH2=Bix8A^)1D@bE{`+XZ9bMjRhjZ42;E*@f_NWa{3AJKFA|Ct5 zf|iX=^f(xbc^eaP%E1!+dE;sP^rQM^8!cG5$%($7L}A3{1DNn}A}qlCjk>}%y=B2g zhXV2BTRIP|x9B*m-(u!LY&CHHVL!~+s`GGUtIh-bUaLc#YK#p&YlC0EqZ9J{yE-A; z-_;3e|DJ&hjs@b{_ndex%mFW=Wrv1;$hHz`4nEjsVDIC>*l)X2lkfKJ%FEFApL%(N zpV=GQGY%|bO%qkul6A{I&<4AFs8|=hb{P2J3xCYnp$Qkx)*V;9!&j0&I(HhHdav#@ zF#MDiU)rTXuw<9gC9Ga}EhZM*?zTwcEiYFmlL7x(BQ zi7$`cJs&W2Z=C#cpt6O?_WCa@hzW9ETLp&g`;?!B6&ef``xO|(Hn}5C;bHOtjfa~D zG#+Mtr15b6BaMfsgHnUIZ(96uU7x}d4y=U5jgK`fx_qKxvHz1gSZq0@Ve#mYf(0fV zHWV1v9oAr|IjrIVrI`OI_oE{g*>soVxuabaB%Gf)DY+ZO;HuB|sE&)Bj~VFunGH7` z(@=2zJdx6SLTj~u$LE@)+>dKKj61IIfQOGCR4Dl3ghoM&lNtrk504yo;Et2NvS99= ze2Rs=Up&PETE29u_M7>o2EzF-H4v_UrGfDBsoKtaO5?Kr*Jk>6f34H+d0M3(ZmaWF zoK~C+HN28M;R@b(XI>G$T3H7J5#S^HMGE*R1O)p?1c+E4i5d~?qggZj+c%+2jbtEg!z2M#oTIfxynUx5(3cGDrC z8SeZ!29N%#E6@8E$spHms`EsMv`*zmTKN=HBxX53D}lqp_YDl{`vm{}r-8Tb1&XO!3(wIw>Mz9(m47`&e)|VTLyyUY z6KeQ}ku=H`fB(}P;jzX^+GCYVao;L|T2^DxMVnq@;EV@> zc%Vk3#`|xb^4I>>s7d-q`tORj9$9hgKPE#z_($26`=rh`$i>k$4zFIm_39O7WUc;w zc|G-mYimMQ|1S^2OxDx};DDNqH%M8UBfx^o{_&%C1n45A`p>~$Mb7kv3mB`9LNkhR z<=t|GyUCkW+G&?7=pua}^@v}5Wrfm{`e4vX0ahB~##?wurfqIKzE*b#qre8B0ZeDy zLHR@8t}Gep!IC^J(^3ym<1)E4CJ~dq=t>5Ed)?#03SE0 zF$7Yn52VOp&Q8xYg)S8dO<);Nb~BEUmj!~TvKjl$S1O9NvRQnmqPML4Ob3V|9y8E#(U#)1J9}9B}C5u0YQ(m&*Z6z$Hb*l0Q3vF)(Eh&V}koPUh4>03@ zBY=a+qdBb&;QMA3WuojR}P)9v4ir8!E2e-gdldXimLq~mhIkb zXU>sxYfI`J1PTr8;*nsm*6|8QfIm5eJwT_Ueh9?zf=2|_0qH$ZQ*Y*&sg|pTNdom8hpi9%@xJTQr;4)OPP*2_-kAn@E7$0CCNWe zbJ{>4G@v_iIzO~Cj;ocCPt|cOIw%?nsf&}Jn#6E*Z+7z8@@7mAoTi1@?f9d-8&lB> z{Pbch*Xg$}updkU)S|r^lh@mW>{!%0o@E;F5%lkPmXTHO_L3Qs-O>n$*c@11^L-G| z=9gLYRVRBaIxzM^?gy&&b)n8wI7hqU7)pF?rn4Owhts7_C6O};%jkTP>6 zJ-OaxH>6{U(1+X&PDw-(OYxnAtIE+292Gl~xQg)JdJFy3i_c?n;X;#ND6cmxQq)3I z`{;uL>3koSk(?6Q7xef-?cKE{N#T!@KZc~3nLI(8S(B&;Qvb1`;#8@p3mfd6 z#HL(Ktp#vwKMmkb{hk5v&i*WYD3ztp_Tt+^KyV$3(+8MnIx~Rf`M);wS{ln9=)Yq2l*>0ot8Z#x*weWsa2C48lY`3-{-zN=kPbyq(1 z@BZKoH_1zmrg!z$D!H{2)T7R0+1{#P4atKeG}xF|52it+%wW1nh~zd1o@YvB`Do1R z?j>*Z@ti|*odjLl1i!;q56J=+aQ2Q>6g?MVBgt4ru%LvBrrm-52@Wa!y9ogpD`PsG5{2+(5 z2Bb3{JabtqU#DqwE^FocG+oSPt$d-TapPF)?-}gjv2iRU|6kdiHcvLGDrLN;`c>nV zrJAABCP?oJpuPh4?g_k2c23ucCU&MxyMRF%o0ygZ4eB)mK4v6O6?2lC98 zNd@^VHGeEa>|8!eNu0NyY)WmK%m>Rp*}H&+s&kmD-Y%#Gx9b!g$7xfZvF>Cc3rg$M z)tDltvPwzNv!|NoRZV4S3GFA-SaR(I2FlQ)TGHKKq#LwY8YW4LlBaVISKRyU43_?U z5=S$tn5Dh)7(ctiB;nEFVkn`SJeWoivsh?fKC^epEcQAfiP|2va=35E z=}mOE*VT&Xt2g)J0bSRM7;axZ7aU-hr6O|81zpYL-93xITgeh_ddO)ob68OD*mHocqN(pOznz>&tm4jZfCZ!(&kwZCVrF8 zJ{1>c!CfIjTU2~C2W~u<^w?gnUhV__mAyZEZ|*(0s^(vXOWh}qo{%+VdR|t+s#&Wd ha;02J_WlwjQc{>5DqpI7p4oe|qj^)s!v!!1{vRI4Clmkx diff --git a/core/dbt/docs/build/html/index.html b/core/dbt/docs/build/html/index.html index 17e7ed2445b..b7f90d68077 100644 --- a/core/dbt/docs/build/html/index.html +++ b/core/dbt/docs/build/html/index.html @@ -208,11 +208,6 @@

    full_refresh -

    models¶

    -

    Type: unknown

    -

    Specify the nodes to include.

    -

    parse_only¶

    Type: boolean

    @@ -590,11 +585,6 @@

    full_refresh -

    models¶

    -

    Type: unknown

    -

    Specify the nodes to include.

    -

    profile¶

    Type: string

    @@ -696,11 +686,6 @@

    full_refresh -

    models¶

    -

    Type: unknown

    -

    Specify the nodes to include.

    -

    profile¶

    Type: string

    @@ -777,11 +762,6 @@

    favor_state -

    models¶

    -

    Type: unknown

    -

    Specify the nodes to include.

    -

    profile¶

    Type: string

    @@ -854,11 +834,6 @@

    indirect_selectionType: choice: [‘eager’, ‘cautious’, ‘buildable’]

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    -
    -

    models¶

    -

    Type: unknown

    -

    Specify the nodes to include.

    -

    profile¶

    Type: string

    diff --git a/core/dbt/docs/build/html/searchindex.js b/core/dbt/docs/build/html/searchindex.js index dcc633eb9a9..b068173aed8 100644 --- a/core/dbt/docs/build/html/searchindex.js +++ b/core/dbt/docs/build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"right": 0, "now": 0, "best": 0, "wai": 0, "from": 0, "i": 0, "us": 0, "dbtrunner": 0, "we": 0, "expos": 0, "cli": 0, "main": 0, "import": 0, "cli_arg": 0, "project": 0, "dir": 0, "jaffle_shop": 0, "initi": 0, "runner": 0, "re": 0, "success": 0, "you": 0, "can": 0, "also": 0, "pass": 0, "pre": 0, "construct": 0, "object": 0, "those": 0, "instead": 0, "load": 0, "up": 0, "disk": 0, "preload": 0, "load_profil": 0, "postgr": 0, "load_project": 0, "fals": 0, "thi": 0, "For": 0, "full": 0, "exampl": 0, "code": 0, "refer": 0, "py": 0, "type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "unknown": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "argument": 0, "provid": 0, "flag": 0, "even": 0, "exist": 0, "databas": 0, "current": 0, "environ": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "buildabl": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "thei": 0, "have": 0, "been": 0, "explicitli": 0, "string": 0, "which": 0, "overrid": 0, "dbt_project": 0, "yml": 0, "path": 0, "directori": 0, "look": 0, "file": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "includ": 0, "The": 0, "name": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "configur": 0, "onli": 0, "appli": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "avail": 0, "inform": 0, "skip": 0, "interact": 0, "setup": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "how": 0, "invok": 0, "command": 0, "python": 0, "runtim": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "favor_st": 0, "full_refresh": 0, "indirect_select": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "resource_typ": 0, "select": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "model": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "project_nam": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "macro": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "How to invoke dbt commands in python runtime": [[0, "how-to-invoke-dbt-commands-in-python-runtime"]], "API documentation": [[0, "api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "favor_state": [[0, "build|favor_state"], [0, "compile|favor_state"], [0, "run|favor_state"], [0, "snapshot|favor_state"], [0, "test|favor_state"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "resource_types": [[0, "build|resource_types"], [0, "list|resource_types"], [0, "list|resource_types"]], "select": [[0, "build|select"], [0, "compile|select"], [0, "list|select"], [0, "list|select"], [0, "run|select"], [0, "seed|select"], [0, "snapshot|select"], [0, "test|select"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "models": [[0, "compile|models"], [0, "list|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "project_name": [[0, "init|project_name"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"], [0, "dbt-section"]], "output": [[0, "list|output"], [0, "list|output"]], "output_keys": [[0, "list|output_keys"], [0, "list|output_keys"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "macro": [[0, "run-operation|macro"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file +Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"right": 0, "now": 0, "best": 0, "wai": 0, "from": 0, "i": 0, "us": 0, "dbtrunner": 0, "we": 0, "expos": 0, "cli": 0, "main": 0, "import": 0, "cli_arg": 0, "project": 0, "dir": 0, "jaffle_shop": 0, "initi": 0, "runner": 0, "re": 0, "success": 0, "you": 0, "can": 0, "also": 0, "pass": 0, "pre": 0, "construct": 0, "object": 0, "those": 0, "instead": 0, "load": 0, "up": 0, "disk": 0, "preload": 0, "load_profil": 0, "postgr": 0, "load_project": 0, "fals": 0, "thi": 0, "For": 0, "full": 0, "exampl": 0, "code": 0, "refer": 0, "py": 0, "type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "unknown": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "argument": 0, "provid": 0, "flag": 0, "even": 0, "exist": 0, "databas": 0, "current": 0, "environ": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "buildabl": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "thei": 0, "have": 0, "been": 0, "explicitli": 0, "string": 0, "which": 0, "overrid": 0, "dbt_project": 0, "yml": 0, "path": 0, "directori": 0, "look": 0, "file": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "includ": 0, "The": 0, "name": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "configur": 0, "onli": 0, "appli": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "avail": 0, "inform": 0, "skip": 0, "interact": 0, "setup": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "how": 0, "invok": 0, "command": 0, "python": 0, "runtim": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "favor_st": 0, "full_refresh": 0, "indirect_select": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "resource_typ": 0, "select": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "project_nam": 0, "skip_profile_setup": 0, "list": 0, "model": 0, "output": 0, "output_kei": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "macro": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "How to invoke dbt commands in python runtime": [[0, "how-to-invoke-dbt-commands-in-python-runtime"]], "API documentation": [[0, "api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "favor_state": [[0, "build|favor_state"], [0, "compile|favor_state"], [0, "run|favor_state"], [0, "snapshot|favor_state"], [0, "test|favor_state"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "resource_types": [[0, "build|resource_types"], [0, "list|resource_types"], [0, "list|resource_types"]], "select": [[0, "build|select"], [0, "compile|select"], [0, "list|select"], [0, "list|select"], [0, "run|select"], [0, "seed|select"], [0, "snapshot|select"], [0, "test|select"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "project_name": [[0, "init|project_name"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"], [0, "dbt-section"]], "models": [[0, "list|models"], [0, "list|models"]], "output": [[0, "list|output"], [0, "list|output"]], "output_keys": [[0, "list|output_keys"], [0, "list|output_keys"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "macro": [[0, "run-operation|macro"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file diff --git a/tests/unit/test_cli.py b/tests/unit/test_cli.py index dca21fdf6de..4ed17583e69 100644 --- a/tests/unit/test_cli.py +++ b/tests/unit/test_cli.py @@ -1,8 +1,5 @@ -import ast -from inspect import getsource - import click -from dbt.cli import params + from dbt.cli.main import cli @@ -42,10 +39,3 @@ def run_test(command): run_test(command) run_test(cli) - - def test_params_are_alpha_sorted(self): - root_node = ast.parse(getsource(params)) - param_var_names = [ - node.targets[0].id for node in ast.walk(root_node) if isinstance(node, ast.Assign) - ] - assert param_var_names == sorted(param_var_names) From 59d773ea7e4e8cad09ac446239b66a68ddb4f9e6 Mon Sep 17 00:00:00 2001 From: Stu Kilgore Date: Tue, 31 Jan 2023 12:40:01 -0600 Subject: [PATCH 34/54] Implement --version in click (#6802) --- .../Under the Hood-20230126-143102.yaml | 6 ++++++ .github/workflows/main.yml | 4 +--- core/dbt/cli/main.py | 17 +++++++++++------ core/dbt/cli/params.py | 6 ------ .../docs/build/doctrees/environment.pickle | Bin 207366 -> 207366 bytes tests/unit/test_cli_flags.py | 2 ++ 6 files changed, 20 insertions(+), 15 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230126-143102.yaml diff --git a/.changes/unreleased/Under the Hood-20230126-143102.yaml b/.changes/unreleased/Under the Hood-20230126-143102.yaml new file mode 100644 index 00000000000..505efabf53c --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230126-143102.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Implement --version for click cli +time: 2023-01-26T14:31:02.740282-06:00 +custom: + Author: stu-k + Issue: "6757" diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 7f4e8607185..c8347f6b069 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -59,9 +59,7 @@ jobs: mypy --version python -m pip install -r requirements.txt python -m pip install -r dev-requirements.txt - # Running version disabled temporarily because version isn't implemented on this branch - # Please un-comment it when GH #6757 / CT-1926 is complete - #dbt --version + dbt --version - name: Run pre-commit hooks run: pre-commit run --all-files --show-diff-on-failure diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 8d257b55189..55ad5986952 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -1,5 +1,4 @@ from copy import copy -from pprint import pformat as pf # This is temporary for RAT-ing from typing import List, Tuple, Optional import click @@ -22,6 +21,7 @@ from dbt.task.build import BuildTask from dbt.task.generate import GenerateTask from dbt.task.init import InitTask +from dbt.version import __version__, get_version_information # CLI invocation @@ -34,6 +34,10 @@ class dbtUsageException(Exception): pass +class dbtInternalException(Exception): + pass + + # Programmatic invocation class dbtRunner: def __init__( @@ -52,6 +56,11 @@ def invoke(self, args: List[str]) -> Tuple[Optional[List], bool]: "manifest": self.manifest, } return cli.invoke(dbt_ctx) + except click.exceptions.Exit as e: + # 0 exit code, expected for --version early exit + if str(e) == "0": + return [], True + raise dbtInternalException(f"unhandled exit code {str(e)}") except (click.NoSuchOption, click.UsageError) as e: raise dbtUsageException(e.message) @@ -64,6 +73,7 @@ def invoke(self, args: List[str]) -> Tuple[Optional[List], bool]: epilog="Specify one of these sub-commands and you can find more help from there.", ) @click.pass_context +@click.version_option(version=__version__, message=get_version_information()) @p.anonymous_usage_stats @p.cache_selected_only @p.debug @@ -82,7 +92,6 @@ def invoke(self, args: List[str]) -> Tuple[Optional[List], bool]: @p.static_parser @p.use_colors @p.use_experimental_parser -@p.version @p.version_check @p.warn_error @p.warn_error_options @@ -91,10 +100,6 @@ def cli(ctx, **kwargs): """An ELT tool for managing your SQL transformations and data models. For more documentation on these commands, visit: docs.getdbt.com """ - # Version info - if ctx.params["version"]: - click.echo(f"`version` called\n ctx.params: {pf(ctx.params)}") - return # dbt build diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index 2dbb5ad511e..ab6f1a5a573 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -377,12 +377,6 @@ default="{}", ) -version = click.option( - "--version", - envvar=None, - help="Show version information", - is_flag=True, -) version_check = click.option( "--version-check/--no-version-check", diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index b63f39d16959d1fdc4ec850d79572950a3ffc140..3b93b3c26db1fbcdf4997300586117bd9e98404b 100644 GIT binary patch delta 32 mcmZp>!qawzXG60*YxqM$KCb2t`SuQZMj&R|-XYIi#{~e(dkYr; delta 32 mcmZp>!qawzXG60*YpC1monM Date: Tue, 31 Jan 2023 19:20:18 -0600 Subject: [PATCH 35/54] Lazily call --version (#6813) * Lazily call --version * Add generated CLI API docs --------- Co-authored-by: Github Build Bot --- .../Under the Hood-20230131-141806.yaml | 6 +++++ core/dbt/cli/main.py | 3 +-- core/dbt/cli/params.py | 21 +++++++++++++++++- .../docs/build/doctrees/environment.pickle | Bin 207366 -> 207366 bytes 4 files changed, 27 insertions(+), 3 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230131-141806.yaml diff --git a/.changes/unreleased/Under the Hood-20230131-141806.yaml b/.changes/unreleased/Under the Hood-20230131-141806.yaml new file mode 100644 index 00000000000..7ebc38b8008 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230131-141806.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Lazily call --version +time: 2023-01-31T14:18:06.02312-06:00 +custom: + Author: stu-k + Issue: "6812" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 55ad5986952..bbe3b5f863f 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -21,7 +21,6 @@ from dbt.task.build import BuildTask from dbt.task.generate import GenerateTask from dbt.task.init import InitTask -from dbt.version import __version__, get_version_information # CLI invocation @@ -73,7 +72,6 @@ def invoke(self, args: List[str]) -> Tuple[Optional[List], bool]: epilog="Specify one of these sub-commands and you can find more help from there.", ) @click.pass_context -@click.version_option(version=__version__, message=get_version_information()) @p.anonymous_usage_stats @p.cache_selected_only @p.debug @@ -92,6 +90,7 @@ def invoke(self, args: List[str]) -> Tuple[Optional[List], bool]: @p.static_parser @p.use_colors @p.use_experimental_parser +@p.version @p.version_check @p.warn_error @p.warn_error_options diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index ab6f1a5a573..c741ece1de0 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -4,7 +4,7 @@ from dbt.cli.options import MultiOption from dbt.cli.option_types import YAML, ChoiceTuple, WarnErrorOptionsType from dbt.cli.resolvers import default_project_dir, default_profiles_dir - +from dbt.version import get_version_information # TODO: The name (reflected in flags) is a correction! # The original name was `SEND_ANONYMOUS_USAGE_STATS` and used an env var called "DBT_SEND_ANONYMOUS_USAGE_STATS" @@ -378,6 +378,25 @@ ) +# TODO: when legacy flags are deprecated use +# click.version_option instead of a callback +def _version_callback(ctx, _param, value): + if not value or ctx.resilient_parsing: + return + click.echo(get_version_information()) + ctx.exit() + + +version = click.option( + "--version", + callback=_version_callback, + envvar=None, + expose_value=False, + help="Show version information", + is_eager=True, + is_flag=True, +) + version_check = click.option( "--version-check/--no-version-check", envvar="DBT_VERSION_CHECK", diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index 3b93b3c26db1fbcdf4997300586117bd9e98404b..a46e9721e0417b1b6fa6dc0488653542e23bf370 100644 GIT binary patch delta 32 mcmZp>!qawzXG60*YaEY&uu^k}e0zsHBM>uf?~rG%;{pJ>7YaoH delta 32 mcmZp>!qawzXG60*YxqM$KCb2t`SuQZMj&R|-XYIi#{~e(dkYr; From 05e53d4143655dc53cb9f8c0d970abd784d7e9bd Mon Sep 17 00:00:00 2001 From: Ian Knox <81931810+iknox-fa@users.noreply.github.com> Date: Mon, 6 Feb 2023 10:49:40 -0600 Subject: [PATCH 36/54] Test fix: `TestProfileEnvVars::test_profile_env_vars` (#6856) --- .../partial_parsing/test_pp_vars.py | 46 ++++++++++--------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/tests/functional/partial_parsing/test_pp_vars.py b/tests/functional/partial_parsing/test_pp_vars.py index 19b3c7db849..47f12a55877 100644 --- a/tests/functional/partial_parsing/test_pp_vars.py +++ b/tests/functional/partial_parsing/test_pp_vars.py @@ -1,31 +1,29 @@ -import pytest +import os +from pathlib import Path -from dbt.tests.util import run_dbt, write_file, run_dbt_and_capture, get_manifest +import pytest +from dbt.constants import SECRET_ENV_PREFIX +from dbt.exceptions import FailedToConnectError, ParsingError +from dbt.tests.util import get_manifest, run_dbt, run_dbt_and_capture, write_file from tests.functional.partial_parsing.fixtures import ( - model_color_sql, - env_var_model_sql, - env_var_schema_yml, - env_var_model_one_sql, - raw_customers_csv, - env_var_sources_yml, - test_color_sql, - env_var_schema2_yml, - env_var_schema3_yml, env_var_macro_sql, env_var_macros_yml, - env_var_model_test_yml, - people_sql, env_var_metrics_yml, + env_var_model_one_sql, + env_var_model_sql, + env_var_model_test_yml, + env_var_schema2_yml, + env_var_schema3_yml, + env_var_schema_yml, + env_var_sources_yml, + model_color_sql, model_one_sql, + people_sql, + raw_customers_csv, + test_color_sql, ) - -from dbt.exceptions import ParsingError -from dbt.constants import SECRET_ENV_PREFIX -import os - - os.environ["DBT_PP_TEST"] = "true" @@ -325,14 +323,20 @@ def test_profile_env_vars(self, project): os.environ["ENV_VAR_USER"] = "root" os.environ["ENV_VAR_PASS"] = "password" - results = run_dbt(["run"]) + run_dbt(["run"]) manifest = get_manifest(project.project_root) env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum # Change env_vars, the user doesn't exist, this should fail os.environ["ENV_VAR_USER"] = "fake_user" - (results, log_output) = run_dbt_and_capture(["run"], expect_pass=False) + + # N.B. run_dbt_and_capture won't work here because FailedToConnectError ends the test entirely + with pytest.raises(FailedToConnectError): + run_dbt(["run"], expect_pass=False) + + log_output = Path(project.project_root, "logs", "dbt.log").read_text() assert "env vars used in profiles.yml have changed" in log_output + manifest = get_manifest(project.project_root) assert env_vars_checksum != manifest.state_check.profile_env_vars_hash.checksum From e08eede5e22f6ead56eaa747a6d78e5a624ba9f3 Mon Sep 17 00:00:00 2001 From: Ian Knox <81931810+iknox-fa@users.noreply.github.com> Date: Mon, 6 Feb 2023 15:24:09 -0600 Subject: [PATCH 37/54] Remove unused `cli_runner` (#6877) --- core/dbt/cli/main.py | 8 +------- .../docs/build/doctrees/environment.pickle | Bin 207366 -> 207366 bytes core/setup.py | 2 +- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index bbe3b5f863f..956f6bde81a 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -23,12 +23,6 @@ from dbt.task.init import InitTask -# CLI invocation -def cli_runner(): - # Run the cli - cli() - - class dbtUsageException(Exception): pass @@ -620,4 +614,4 @@ def test(ctx, **kwargs): # Support running as a module if __name__ == "__main__": - cli_runner() + cli() diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index a46e9721e0417b1b6fa6dc0488653542e23bf370..c87b318b6fab80b3f97008bd2da703c29f149526 100644 GIT binary patch delta 33 ncmZp>!qawzXG4=b+Yc|J+lPCbJLKCt!qawzXG4=b+qXC#0b!-)4*B*Dc}5^++TJ10T*n0f)Kd#P diff --git a/core/setup.py b/core/setup.py index f3b9c2017e4..148120d4891 100644 --- a/core/setup.py +++ b/core/setup.py @@ -43,7 +43,7 @@ include_package_data=True, test_suite="test", entry_points={ - "console_scripts": ["dbt = dbt.cli.main:cli_runner"], + "console_scripts": ["dbt = dbt.cli.main:cli"], }, install_requires=[ "Jinja2==3.1.2", From 9c0b62b4f5b055f9f5ad28c6dcffce19d8f5c40c Mon Sep 17 00:00:00 2001 From: Kshitij Aranke Date: Mon, 6 Feb 2023 15:13:40 -0800 Subject: [PATCH 38/54] Fix CLI vars test to check for object instead of string (#6850) --- tests/functional/context_methods/test_cli_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/context_methods/test_cli_vars.py b/tests/functional/context_methods/test_cli_vars.py index 353d96d777b..5f5b222f5da 100644 --- a/tests/functional/context_methods/test_cli_vars.py +++ b/tests/functional/context_methods/test_cli_vars.py @@ -99,7 +99,7 @@ def test__cli_vars_longer(self, project): results = run_dbt(["test", "--vars", "{simple: abc, unused: def}"]) assert len(results) == 1 run_results = get_artifact(project.project_root, "target", "run_results.json") - assert run_results["args"]["vars"] == "{simple: abc, unused: def}" + assert run_results["args"]["vars"] == {"simple": "abc", "unused": "def"} class TestCLIVarsProfile: From d0b5d752df177f58f91fc081ca29f05732c0d1b0 Mon Sep 17 00:00:00 2001 From: Chenyu Li Date: Tue, 7 Feb 2023 08:35:35 -0800 Subject: [PATCH 39/54] consolidate flags (#6788) Co-authored-by: Michelle Ark Co-authored-by: Github Build Bot --- core/dbt/adapters/cache.py | 5 +- core/dbt/cli/flags.py | 89 +- core/dbt/cli/main.py | 1 - core/dbt/cli/option_types.py | 1 + core/dbt/cli/params.py | 4 +- core/dbt/cli/requires.py | 2 + core/dbt/clients/jinja.py | 7 +- core/dbt/compilation.py | 3 +- core/dbt/config/profile.py | 43 +- core/dbt/config/project.py | 11 +- core/dbt/config/runtime.py | 6 +- core/dbt/config/utils.py | 61 +- core/dbt/context/base.py | 7 +- core/dbt/contracts/graph/manifest.py | 8 +- core/dbt/contracts/graph/nodes.py | 4 +- core/dbt/deps/registry.py | 4 +- .../docs/build/doctrees/environment.pickle | Bin 207366 -> 207366 bytes core/dbt/events/functions.py | 23 +- core/dbt/flags.py | 320 +---- core/dbt/graph/cli.py | 5 +- core/dbt/lib.py | 3 +- core/dbt/parser/generic_test.py | 4 +- core/dbt/parser/macros.py | 4 +- core/dbt/parser/manifest.py | 7 +- core/dbt/parser/models.py | 4 +- core/dbt/task/debug.py | 7 +- core/dbt/task/init.py | 14 +- core/dbt/task/runnable.py | 10 +- core/dbt/task/test.py | 4 +- core/dbt/tests/util.py | 9 + core/dbt/ui.py | 6 +- core/dbt/version.py | 3 - pytest.ini | 1 - .../create_incremental__dbt_tmp.sql | 4 - .../035_docs_blocks_tests/docs/docs.md | 17 - .../duplicate_docs/docs.md | 7 - .../duplicate_docs/model.sql | 1 - .../duplicate_docs/schema.yml | 5 - .../invalid_name_models/docs.md | 12 - .../invalid_name_models/model.sql | 1 - .../invalid_name_models/schema.yml | 10 - .../missing_docs_models/docs.md | 7 - .../missing_docs_models/model.sql | 1 - .../missing_docs_models/schema.yml | 11 - .../035_docs_blocks_tests/models/docs.md | 17 - .../035_docs_blocks_tests/models/model.sql | 1 - .../035_docs_blocks_tests/models/schema.yml | 12 - .../038_caching_tests/models/model.sql | 6 - .../another_schema_model.sql | 7 - .../models_multi_schemas/model.sql | 6 - .../shouting_models/MODEL.sql | 6 - .../models/do_nothing_1.sql | 1 - .../models/do_nothing_2.sql | 1 - .../models/do_nothing_3.sql | 1 - .../test_target_path.py | 44 - test/integration/README.md | 1 - test/integration/__init__.py | 0 test/integration/base.py | 1158 ----------------- test/unit/test_config.py | 18 +- test/unit/test_flags.py | 2 + test/unit/test_graph.py | 6 +- test/unit/test_manifest.py | 6 +- .../configs/test_get_project_config.py | 35 - .../dependencies/test_local_dependency.py | 2 - tests/functional/profiles/test_profile_dir.py | 4 +- tests/unit/test_cli_flags.py | 7 +- tests/unit/test_deprecations.py | 7 + tests/unit/test_functions.py | 12 +- 68 files changed, 311 insertions(+), 1805 deletions(-) delete mode 100644 test/integration/017_runtime_materialization_tests/create_incremental__dbt_tmp.sql delete mode 100644 test/integration/035_docs_blocks_tests/docs/docs.md delete mode 100644 test/integration/035_docs_blocks_tests/duplicate_docs/docs.md delete mode 100644 test/integration/035_docs_blocks_tests/duplicate_docs/model.sql delete mode 100644 test/integration/035_docs_blocks_tests/duplicate_docs/schema.yml delete mode 100644 test/integration/035_docs_blocks_tests/invalid_name_models/docs.md delete mode 100644 test/integration/035_docs_blocks_tests/invalid_name_models/model.sql delete mode 100644 test/integration/035_docs_blocks_tests/invalid_name_models/schema.yml delete mode 100644 test/integration/035_docs_blocks_tests/missing_docs_models/docs.md delete mode 100644 test/integration/035_docs_blocks_tests/missing_docs_models/model.sql delete mode 100644 test/integration/035_docs_blocks_tests/missing_docs_models/schema.yml delete mode 100644 test/integration/035_docs_blocks_tests/models/docs.md delete mode 100644 test/integration/035_docs_blocks_tests/models/model.sql delete mode 100644 test/integration/035_docs_blocks_tests/models/schema.yml delete mode 100644 test/integration/038_caching_tests/models/model.sql delete mode 100644 test/integration/038_caching_tests/models_multi_schemas/another_schema_model.sql delete mode 100644 test/integration/038_caching_tests/models_multi_schemas/model.sql delete mode 100644 test/integration/038_caching_tests/shouting_models/MODEL.sql delete mode 100644 test/integration/075_custom_target_path/models/do_nothing_1.sql delete mode 100644 test/integration/075_custom_target_path/models/do_nothing_2.sql delete mode 100644 test/integration/075_custom_target_path/models/do_nothing_3.sql delete mode 100644 test/integration/075_custom_target_path/test_target_path.py delete mode 100644 test/integration/README.md delete mode 100644 test/integration/__init__.py delete mode 100644 test/integration/base.py delete mode 100644 tests/functional/configs/test_get_project_config.py diff --git a/core/dbt/adapters/cache.py b/core/dbt/adapters/cache.py index 24a0e469df1..dc8ff14e67e 100644 --- a/core/dbt/adapters/cache.py +++ b/core/dbt/adapters/cache.py @@ -17,7 +17,7 @@ ) from dbt.events.functions import fire_event, fire_event_if from dbt.events.types import CacheAction, CacheDumpGraph -import dbt.flags as flags +from dbt.flags import get_flags from dbt.utils import lowercase @@ -319,6 +319,7 @@ def add(self, relation): :param BaseRelation relation: The underlying relation. """ + flags = get_flags() cached = _CachedRelation(relation) fire_event_if( flags.LOG_CACHE_EVENTS, @@ -456,7 +457,7 @@ def rename(self, old, new): ref_key_2=_make_msg_from_ref_key(new), ) ) - + flags = get_flags() fire_event_if( flags.LOG_CACHE_EVENTS, lambda: CacheDumpGraph(before_after="before", action="rename", dump=self.dump_graph()), diff --git a/core/dbt/cli/flags.py b/core/dbt/cli/flags.py index dcfb59507c5..e6d15ced5ab 100644 --- a/core/dbt/cli/flags.py +++ b/core/dbt/cli/flags.py @@ -12,30 +12,85 @@ from dbt.config.profile import read_user_config from dbt.contracts.project import UserConfig +from dbt.helper_types import WarnErrorOptions +from dbt.config.project import PartialProject +from dbt.exceptions import DbtProjectError if os.name != "nt": # https://bugs.python.org/issue41567 import multiprocessing.popen_spawn_posix # type: ignore # noqa: F401 +# TODO anything that has a default in params should be removed here? +# Or maybe only the ones that's in the root click group +FLAGS_DEFAULTS = { + "INDIRECT_SELECTION": "eager", + "TARGET_PATH": None, + # cli args without user_config or env var option + "FULL_REFRESH": False, + "STRICT_MODE": False, + "STORE_FAILURES": False, +} + + +# For backwards compatability, some params are defined across multiple levels, +# Top-level value should take precedence. +# e.g. dbt --target-path test2 run --target-path test2 +EXPECTED_DUPLICATE_PARAMS = [ + "full_refresh", + "target_path", + "version_check", + "fail_fast", + "indirect_selection", + "store_failures", +] + + +def convert_config(config_name, config_value): + # This function should take care of converting the values from config and original + # set_from_args to the correct type + ret = config_value + if config_name.lower() == "warn_error_options": + ret = WarnErrorOptions( + include=config_value.get("include", []), exclude=config_value.get("exclude", []) + ) + return ret + @dataclass(frozen=True) class Flags: def __init__(self, ctx: Context = None, user_config: UserConfig = None) -> None: + # set the default flags + for key, value in FLAGS_DEFAULTS.items(): + object.__setattr__(self, key, value) + if ctx is None: ctx = get_current_context() def assign_params(ctx, params_assigned_from_default): """Recursively adds all click params to flag object""" for param_name, param_value in ctx.params.items(): + # TODO: this is to avoid duplicate params being defined in two places (version_check in run and cli) + # However this is a bit of a hack and we should find a better way to do this + # N.B. You have to use the base MRO method (object.__setattr__) to set attributes # when using frozen dataclasses. # https://docs.python.org/3/library/dataclasses.html#frozen-instances - if hasattr(self, param_name): - raise Exception(f"Duplicate flag names found in click command: {param_name}") - object.__setattr__(self, param_name.upper(), param_value) - if ctx.get_parameter_source(param_name) == ParameterSource.DEFAULT: - params_assigned_from_default.add(param_name) + if hasattr(self, param_name.upper()): + if param_name not in EXPECTED_DUPLICATE_PARAMS: + raise Exception( + f"Duplicate flag names found in click command: {param_name}" + ) + else: + # Expected duplicate param from multi-level click command (ex: dbt --full_refresh run --full_refresh) + # Overwrite user-configured param with value from parent context + if ctx.get_parameter_source(param_name) != ParameterSource.DEFAULT: + object.__setattr__(self, param_name.upper(), param_value) + else: + object.__setattr__(self, param_name.upper(), param_value) + if ctx.get_parameter_source(param_name) == ParameterSource.DEFAULT: + params_assigned_from_default.add(param_name) + if ctx.parent: assign_params(ctx.parent, params_assigned_from_default) @@ -64,7 +119,9 @@ def assign_params(ctx, params_assigned_from_default): user_config_param_value = getattr(user_config, param_assigned_from_default, None) if user_config_param_value is not None: object.__setattr__( - self, param_assigned_from_default.upper(), user_config_param_value + self, + param_assigned_from_default.upper(), + convert_config(param_assigned_from_default, user_config_param_value), ) param_assigned_from_default_copy.remove(param_assigned_from_default) params_assigned_from_default = param_assigned_from_default_copy @@ -73,6 +130,26 @@ def assign_params(ctx, params_assigned_from_default): object.__setattr__(self, "WHICH", invoked_subcommand_name or ctx.info_name) object.__setattr__(self, "MP_CONTEXT", get_context("spawn")) + # Default LOG_PATH from PROJECT_DIR, if available. + if getattr(self, "LOG_PATH", None) is None: + log_path = "logs" + project_dir = getattr(self, "PROJECT_DIR", None) + # If available, set LOG_PATH from log-path in dbt_project.yml + # Known limitations: + # 1. Using PartialProject here, so no jinja rendering of log-path. + # 2. Programmatic invocations of the cli via dbtRunner may pass a Project object directly, + # which is not being used here to extract log-path. + if project_dir: + try: + partial = PartialProject.from_project_root( + project_dir, verify_version=getattr(self, "VERSION_CHECK", True) + ) + log_path = str(partial.project_dict.get("log-path", log_path)) + except DbtProjectError: + pass + + object.__setattr__(self, "LOG_PATH", log_path) + # Support console DO NOT TRACK initiave object.__setattr__( self, diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 956f6bde81a..109bd839cd1 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -490,7 +490,6 @@ def seed(ctx, **kwargs): ctx.obj["runtime_config"], ctx.obj["manifest"], ) - results = task.run() success = task.interpret_results(results) return results, success diff --git a/core/dbt/cli/option_types.py b/core/dbt/cli/option_types.py index 6149dd7d5ee..b5eea995624 100644 --- a/core/dbt/cli/option_types.py +++ b/core/dbt/cli/option_types.py @@ -27,6 +27,7 @@ class WarnErrorOptionsType(YAML): name = "WarnErrorOptionsType" def convert(self, value, param, ctx): + # this function is being used by param in click include_exclude = super().convert(value, param, ctx) return WarnErrorOptions( diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index c741ece1de0..afe128bb480 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -132,7 +132,7 @@ "--log-path", envvar="DBT_LOG_PATH", help="Configure the 'log-path'. Only applies this setting for the current run. Overrides the 'DBT_LOG_PATH' if it is set.", - default=lambda: Path.cwd() / "logs", + default=None, type=click.Path(resolve_path=True, path_type=Path), ) @@ -415,7 +415,7 @@ def _version_callback(ctx, _param, value): warn_error_options = click.option( "--warn-error-options", envvar="DBT_WARN_ERROR_OPTIONS", - default=None, + default="{}", help="""If dbt would normally warn, instead raise an exception based on include/exclude configuration. Examples include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests. This argument should be a YAML string, with keys 'include' or 'exclude'. eg. '{"include": "all", "exclude": ["NoNodesForSelectionCriteria"]}'""", type=WarnErrorOptionsType(), diff --git a/core/dbt/cli/requires.py b/core/dbt/cli/requires.py index 74ec80c986a..eb45374db35 100644 --- a/core/dbt/cli/requires.py +++ b/core/dbt/cli/requires.py @@ -1,4 +1,5 @@ from dbt.adapters.factory import adapter_management, register_adapter +from dbt.flags import set_flags from dbt.cli.flags import Flags from dbt.config import RuntimeConfig from dbt.config.runtime import load_project, load_profile @@ -21,6 +22,7 @@ def wrapper(*args, **kwargs): # Flags flags = Flags(ctx) ctx.obj["flags"] = flags + set_flags(flags) # Tracking initialize_from_flags(flags.ANONYMOUS_USAGE_STATS, flags.PROFILES_DIR) diff --git a/core/dbt/clients/jinja.py b/core/dbt/clients/jinja.py index e9dcb45017b..ecf668c11e5 100644 --- a/core/dbt/clients/jinja.py +++ b/core/dbt/clients/jinja.py @@ -40,7 +40,7 @@ UndefinedCompilationError, UndefinedMacroError, ) -from dbt import flags +from dbt.flags import get_flags from dbt.node_types import ModelLanguage @@ -99,8 +99,9 @@ def _compile(self, source, filename): If the value is 'write', also write the files to disk. WARNING: This can write a ton of data if you aren't careful. """ - if filename == "