diff --git a/.circleci/config.yml b/.circleci/config.yml index 15e367c1..c00bcdc8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -10,6 +10,12 @@ jobs: executor: python/default steps: - checkout + - run: + # ONLY UNTIL NEW RC OF UPSTREAM + name: TEMP install git + command: | + apt-get update + apt-get install -y git - python/install-packages: pkg-manager: pip - run: diff --git a/README.md b/README.md index 0d87794f..eb135fcc 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ sudo apt install unixodbc-dev The following is needed for every target definition for both SQL Server and Azure SQL. The sections below details how to connect to SQL Server and Azure SQL specifically. ``` -type: sqlserver +type: synapse driver: 'ODBC Driver 17 for SQL Server' (The ODBC Driver installed on your system) server: server-host-name or ip port: 1433 diff --git a/dbt/adapters/sqlserver/__init__.py b/dbt/adapters/sqlserver/__init__.py deleted file mode 100644 index c1b2c56b..00000000 --- a/dbt/adapters/sqlserver/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from dbt.adapters.sqlserver.connections import SQLServerConnectionManager -from dbt.adapters.sqlserver.connections import SQLServerCredentials -from dbt.adapters.sqlserver.impl import SQLServerAdapter - -from dbt.adapters.base import AdapterPlugin -from dbt.include import sqlserver - - -Plugin = AdapterPlugin( - adapter=SQLServerAdapter, - credentials=SQLServerCredentials, - include_path=sqlserver.PACKAGE_PATH, -) diff --git a/dbt/adapters/sqlserver/__version__.py b/dbt/adapters/sqlserver/__version__.py deleted file mode 100644 index 6a7765be..00000000 --- a/dbt/adapters/sqlserver/__version__.py +++ /dev/null @@ -1 +0,0 @@ -version = '0.18.1' \ No newline at end of file diff --git a/dbt/adapters/sqlserver/connections.py b/dbt/adapters/sqlserver/connections.py deleted file mode 100644 index bdc0f7fb..00000000 --- a/dbt/adapters/sqlserver/connections.py +++ /dev/null @@ -1,375 +0,0 @@ -from contextlib import contextmanager - -import pyodbc -import os -import time -import struct -from itertools import chain, repeat -from typing import Callable, Mapping - -import dbt.exceptions -from dbt.adapters.base import Credentials -from dbt.adapters.sql import SQLConnectionManager -from azure.core.credentials import AccessToken -from azure.identity import AzureCliCredential, DefaultAzureCredential - -from dbt.logger import GLOBAL_LOGGER as logger - -from dataclasses import dataclass -from typing import Optional - - -AZURE_CREDENTIAL_SCOPE = "https://database.windows.net//.default" - - -@dataclass -class SQLServerCredentials(Credentials): - driver: str - host: str - database: str - schema: str - port: Optional[int] = 1433 - UID: Optional[str] = None - PWD: Optional[str] = None - windows_login: Optional[bool] = False - tenant_id: Optional[str] = None - client_id: Optional[str] = None - client_secret: Optional[str] = None - # "sql", "ActiveDirectoryPassword" or "ActiveDirectoryInteractive", or - # "ServicePrincipal" - authentication: Optional[str] = "sql" - encrypt: Optional[bool] = True - trust_cert: Optional[bool] = True - - _ALIASES = { - "user": "UID", - "username": "UID", - "pass": "PWD", - "password": "PWD", - "server": "host", - "trusted_connection": "windows_login", - "auth": "authentication", - "app_id": "client_id", - "app_secret": "client_secret", - "TrustServerCertificate": "trust_cert", - } - - @property - def type(self): - return "sqlserver" - - def _connection_keys(self): - # return an iterator of keys to pretty-print in 'dbt debug' - # raise NotImplementedError - if self.windows_login is True: - self.authentication = "Windows Login" - - return ( - "server", - "database", - "schema", - "port", - "UID", - "client_id", - "authentication", - "encrypt", - "trust_cert" - ) - - -def convert_bytes_to_mswindows_byte_string(value: bytes) -> bytes: - """ - Convert bytes to a Microsoft windows byte string. - - Parameters - ---------- - value : bytes - The bytes. - - Returns - ------- - out : bytes - The Microsoft byte string. - """ - encoded_bytes = bytes(chain.from_iterable(zip(value, repeat(0)))) - return struct.pack(" bytes: - """ - Convert an access token to a Microsoft windows byte string. - - Parameters - ---------- - token : AccessToken - The token. - - Returns - ------- - out : bytes - The Microsoft byte string. - """ - value = bytes(token.token, "UTF-8") - return convert_bytes_to_mswindows_byte_string(value) - - -def get_cli_access_token(credentials: SQLServerCredentials) -> AccessToken: - """ - Get an Azure access token using the CLI credentials - - First login with: - - ```bash - az login - ``` - - Parameters - ---------- - credentials: SQLServerConnectionManager - The credentials. - - Returns - ------- - out : AccessToken - Access token. - """ - _ = credentials - token = AzureCliCredential().get_token(AZURE_CREDENTIAL_SCOPE) - return token - - -def get_sp_access_token(credentials: SQLServerCredentials) -> AccessToken: - """ - Get an Azure access token using the SP credentials. - - Parameters - ---------- - credentials : SQLServerCredentials - Credentials. - - Returns - ------- - out : AccessToken - The access token. - """ - # bc DefaultAzureCredential will look in env variables - os.environ["AZURE_TENANT_ID"] = credentials.tenant_id - os.environ["AZURE_CLIENT_ID"] = credentials.client_id - os.environ["AZURE_CLIENT_SECRET"] = credentials.client_secret - - token = DefaultAzureCredential().get_token(AZURE_CREDENTIAL_SCOPE) - return token - - -AZURE_AUTH_FUNCTION_TYPE = Callable[[SQLServerCredentials], AccessToken] -AZURE_AUTH_FUNCTIONS: Mapping[str, AZURE_AUTH_FUNCTION_TYPE] = { - "ServicePrincipal": get_sp_access_token, - "CLI": get_cli_access_token, -} - - -class SQLServerConnectionManager(SQLConnectionManager): - TYPE = "sqlserver" - TOKEN = None - - @contextmanager - def exception_handler(self, sql): - try: - yield - - except pyodbc.DatabaseError as e: - logger.debug("Database error: {}".format(str(e))) - - try: - # attempt to release the connection - self.release() - except pyodbc.Error: - logger.debug("Failed to release connection!") - pass - - raise dbt.exceptions.DatabaseException(str(e).strip()) from e - - except Exception as e: - logger.debug(f"Error running SQL: {sql}") - logger.debug("Rolling back transaction.") - self.release() - if isinstance(e, dbt.exceptions.RuntimeException): - # during a sql query, an internal to dbt exception was raised. - # this sounds a lot like a signal handler and probably has - # useful information, so raise it without modification. - raise - - raise dbt.exceptions.RuntimeException(e) - - @classmethod - def open(cls, connection): - - if connection.state == "open": - logger.debug("Connection is already open, skipping open.") - return connection - - credentials = connection.credentials - - try: - con_str = [] - con_str.append(f"DRIVER={{{credentials.driver}}}") - - if "\\" in credentials.host: - # if there is a backslash \ in the host name the host is a sql-server named instance - # in this case then port number has to be omitted - con_str.append(f"SERVER={credentials.host}") - else: - con_str.append(f"SERVER={credentials.host},{credentials.port}") - - con_str.append(f"Database={credentials.database}") - - type_auth = getattr(credentials, "authentication", "sql") - - if "ActiveDirectory" in type_auth: - con_str.append(f"Authentication={credentials.authentication}") - - if type_auth == "ActiveDirectoryPassword": - con_str.append(f"UID={{{credentials.UID}}}") - con_str.append(f"PWD={{{credentials.PWD}}}") - elif type_auth == "ActiveDirectoryInteractive": - con_str.append(f"UID={{{credentials.UID}}}") - elif type_auth == "ActiveDirectoryIntegrated": - # why is this necessary??? - con_str.remove("UID={None}") - elif type_auth == "ActiveDirectoryMsi": - raise ValueError("ActiveDirectoryMsi is not supported yet") - - elif type_auth == "ServicePrincipal": - app_id = getattr(credentials, "AppId", None) - app_secret = getattr(credentials, "AppSecret", None) - - elif getattr(credentials, "windows_login", False): - con_str.append(f"trusted_connection=yes") - elif type_auth == "sql": - #con_str.append("Authentication=SqlPassword") - con_str.append(f"UID={{{credentials.UID}}}") - con_str.append(f"PWD={{{credentials.PWD}}}") - - # still confused whether to use "Yes", "yes", "True", or "true" - # to learn more visit - # https://docs.microsoft.com/en-us/sql/relational-databases/native-client/features/using-encryption-without-validation?view=sql-server-ver15 - if getattr(credentials, "encrypt", False) is True: - con_str.append(f"Encrypt=Yes") - if getattr(credentials, "trust_cert", False) is True: - con_str.append(f"TrustServerCertificate=Yes") - - con_str_concat = ';'.join(con_str) - - index = [] - for i, elem in enumerate(con_str): - if 'pwd=' in elem.lower(): - index.append(i) - - if len(index) !=0 : - con_str[index[0]]="PWD=***" - - con_str_display = ';'.join(con_str) - - logger.debug(f'Using connection string: {con_str_display}') - - if type_auth in AZURE_AUTH_FUNCTIONS.keys(): - # create token if it does not exist - if cls.TOKEN is None: - azure_auth_function = AZURE_AUTH_FUNCTIONS[type_auth] - token = azure_auth_function(credentials) - cls.TOKEN = convert_access_token_to_mswindows_byte_string( - token - ) - - # Source: - # https://docs.microsoft.com/en-us/sql/connect/odbc/using-azure-active-directory?view=sql-server-ver15#authenticating-with-an-access-token - SQL_COPT_SS_ACCESS_TOKEN = 1256 - - attrs_before = {SQL_COPT_SS_ACCESS_TOKEN: cls.TOKEN} - else: - attrs_before = {} - - handle = pyodbc.connect( - con_str_concat, - attrs_before=attrs_before, - autocommit=True, - ) - - connection.state = "open" - connection.handle = handle - logger.debug(f"Connected to db: {credentials.database}") - - except pyodbc.Error as e: - logger.debug(f"Could not connect to db: {e}") - - connection.handle = None - connection.state = "fail" - - raise dbt.exceptions.FailedToConnectException(str(e)) - - return connection - - def cancel(self, connection): - logger.debug("Cancel query") - pass - - def add_begin_query(self): - # return self.add_query('BEGIN TRANSACTION', auto_begin=False) - pass - - def add_commit_query(self): - # return self.add_query('COMMIT TRANSACTION', auto_begin=False) - pass - - def add_query(self, sql, auto_begin=True, bindings=None, abridge_sql_log=False): - - connection = self.get_thread_connection() - - if auto_begin and connection.transaction_open is False: - self.begin() - - logger.debug('Using {} connection "{}".'.format(self.TYPE, connection.name)) - - with self.exception_handler(sql): - if abridge_sql_log: - logger.debug("On {}: {}....".format(connection.name, sql[0:512])) - else: - logger.debug("On {}: {}".format(connection.name, sql)) - pre = time.time() - - cursor = connection.handle.cursor() - - # pyodbc does not handle a None type binding! - if bindings is None: - cursor.execute(sql) - else: - cursor.execute(sql, bindings) - - logger.debug( - "SQL status: {} in {:0.2f} seconds".format( - self.get_status(cursor), (time.time() - pre) - ) - ) - - return connection, cursor - - @classmethod - def get_credentials(cls, credentials): - return credentials - - @classmethod - def get_status(cls, cursor): - if cursor.rowcount == -1: - status = "OK" - else: - status = str(cursor.rowcount) - return status - - def execute(self, sql, auto_begin=True, fetch=False): - _, cursor = self.add_query(sql, auto_begin) - status = self.get_status(cursor) - if fetch: - table = self.get_result_from_cursor(cursor) - else: - table = dbt.clients.agate_helper.empty_table() - return status, table diff --git a/dbt/adapters/sqlserver/impl.py b/dbt/adapters/sqlserver/impl.py deleted file mode 100644 index 8cab4554..00000000 --- a/dbt/adapters/sqlserver/impl.py +++ /dev/null @@ -1,128 +0,0 @@ -from dbt.adapters.sql import SQLAdapter -from dbt.adapters.sqlserver import SQLServerConnectionManager -from dbt.adapters.base.relation import BaseRelation -import agate -from typing import ( - Optional, Tuple, Callable, Iterable, Type, Dict, Any, List, Mapping, - Iterator, Union, Set -) - - -class SQLServerAdapter(SQLAdapter): - ConnectionManager = SQLServerConnectionManager - - @classmethod - def date_function(cls): - return "getdate()" - - @classmethod - def convert_text_type(cls, agate_table, col_idx): - column = agate_table.columns[col_idx] - lens = (len(d.encode("utf-8")) for d in column.values_without_nulls()) - max_len = max(lens) if lens else 64 - length = max_len if max_len > 16 else 16 - return "varchar({})".format(length) - - @classmethod - def convert_datetime_type(cls, agate_table, col_idx): - return "datetime" - - @classmethod - def convert_boolean_type(cls, agate_table, col_idx): - return "bit" - - @classmethod - def convert_number_type(cls, agate_table, col_idx): - decimals = agate_table.aggregate(agate.MaxPrecision(col_idx)) - return "float" if decimals else "int" - - @classmethod - def convert_time_type(cls, agate_table, col_idx): - return "datetime" - - # Methods used in adapter tests - def timestamp_add_sql( - self, add_to: str, number: int = 1, interval: str = "hour" - ) -> str: - # note: 'interval' is not supported for T-SQL - # for backwards compatibility, we're compelled to set some sort of - # default. A lot of searching has lead me to believe that the - # '+ interval' syntax used in postgres/redshift is relatively common - # and might even be the SQL standard's intention. - return f"DATEADD({interval},{number},{add_to})" - - def string_add_sql( - self, add_to: str, value: str, location='append', - ) -> str: - """ - `+` is T-SQL's string concatenation operator - """ - if location == 'append': - return f"{add_to} + '{value}'" - elif location == 'prepend': - return f"'{value}' + {add_to}" - else: - raise RuntimeException( - f'Got an unexpected location value of "{location}"' - ) - - def get_rows_different_sql( - self, - relation_a: BaseRelation, - relation_b: BaseRelation, - column_names: Optional[List[str]] = None, - except_operator: str = "EXCEPT", - ) -> str: - - """ - note: using is not supported on Synapse so COLUMNS_EQUAL_SQL is adjsuted - Generate SQL for a query that returns a single row with a two - columns: the number of rows that are different between the two - relations and the number of mismatched rows. - """ - # This method only really exists for test reasons. - names: List[str] - if column_names is None: - columns = self.get_columns_in_relation(relation_a) - names = sorted((self.quote(c.name) for c in columns)) - else: - names = sorted((self.quote(n) for n in column_names)) - columns_csv = ", ".join(names) - - sql = COLUMNS_EQUAL_SQL.format( - columns=columns_csv, - relation_a=str(relation_a), - relation_b=str(relation_b), - except_op=except_operator, - ) - - return sql - - -COLUMNS_EQUAL_SQL = """ -with diff_count as ( - SELECT - 1 as id, - COUNT(*) as num_missing FROM ( - (SELECT {columns} FROM {relation_a} {except_op} - SELECT {columns} FROM {relation_b}) - UNION ALL - (SELECT {columns} FROM {relation_b} {except_op} - SELECT {columns} FROM {relation_a}) - ) as a -), table_a as ( - SELECT COUNT(*) as num_rows FROM {relation_a} -), table_b as ( - SELECT COUNT(*) as num_rows FROM {relation_b} -), row_count_diff as ( - select - 1 as id, - table_a.num_rows - table_b.num_rows as difference - from table_a, table_b -) -select - row_count_diff.difference as row_count_difference, - diff_count.num_missing as num_mismatched -from row_count_diff -join diff_count on row_count_diff.id = diff_count.id -""".strip() diff --git a/dbt/adapters/synapse/__init__.py b/dbt/adapters/synapse/__init__.py new file mode 100644 index 00000000..66b00483 --- /dev/null +++ b/dbt/adapters/synapse/__init__.py @@ -0,0 +1,14 @@ +from dbt.adapters.synapse.connections import SynapseConnectionManager +from dbt.adapters.synapse.connections import SynapseCredentials +from dbt.adapters.synapse.impl import SynapseAdapter + +from dbt.adapters.base import AdapterPlugin +from dbt.include import synapse + + +Plugin = AdapterPlugin( + adapter=SynapseAdapter, + credentials=SynapseCredentials, + include_path=synapse.PACKAGE_PATH, + dependencies=['sqlserver'] +) diff --git a/dbt/adapters/synapse/__version__.py b/dbt/adapters/synapse/__version__.py new file mode 100644 index 00000000..f3bd5fd8 --- /dev/null +++ b/dbt/adapters/synapse/__version__.py @@ -0,0 +1 @@ +version = '0.18.1' diff --git a/dbt/adapters/synapse/connections.py b/dbt/adapters/synapse/connections.py new file mode 100644 index 00000000..82715163 --- /dev/null +++ b/dbt/adapters/synapse/connections.py @@ -0,0 +1,18 @@ + +from dataclasses import dataclass + +from dbt.adapters.sqlserver import (SQLServerConnectionManager, + SQLServerCredentials) + + +@dataclass +class SynapseCredentials(SQLServerCredentials): + + @property + def type(self): + return "synapse" + +class SynapseConnectionManager(SQLServerConnectionManager): + TYPE = "synapse" + TOKEN = None + diff --git a/dbt/adapters/synapse/impl.py b/dbt/adapters/synapse/impl.py new file mode 100644 index 00000000..73d6d563 --- /dev/null +++ b/dbt/adapters/synapse/impl.py @@ -0,0 +1,7 @@ +from dbt.adapters.sqlserver import SQLServerAdapter +from dbt.adapters.synapse import SynapseConnectionManager + + + +class SynapseAdapter(SQLServerAdapter): + ConnectionManager = SynapseConnectionManager \ No newline at end of file diff --git a/dbt/include/sqlserver/macros/adapters.sql b/dbt/include/sqlserver/macros/adapters.sql deleted file mode 100644 index e28744f2..00000000 --- a/dbt/include/sqlserver/macros/adapters.sql +++ /dev/null @@ -1,208 +0,0 @@ -{% macro sqlserver__information_schema_name(database) -%} - information_schema -{%- endmacro %} - -{% macro sqlserver__get_columns_in_query(select_sql) %} - {% call statement('get_columns_in_query', fetch_result=True, auto_begin=False) -%} - select TOP 0 * from ( - {{ select_sql }} - ) as __dbt_sbq - {% endcall %} - {{ return(load_result('get_columns_in_query').table.columns | map(attribute='name') | list) }} -{% endmacro %} - -{% macro sqlserver__list_relations_without_caching(schema_relation) %} - {% call statement('list_relations_without_caching', fetch_result=True) -%} - select - table_catalog as [database], - table_name as [name], - table_schema as [schema], - case when table_type = 'BASE TABLE' then 'table' - when table_type = 'VIEW' then 'view' - else table_type - end as table_type - from information_schema.tables - where table_schema like '{{ schema_relation.schema }}' - and table_catalog like '{{ schema_relation.database }}' - {% endcall %} - {{ return(load_result('list_relations_without_caching').table) }} -{% endmacro %} - -{% macro sqlserver__list_schemas(database) %} - {% call statement('list_schemas', fetch_result=True, auto_begin=False) -%} - select name as [schema] - from sys.schemas - {% endcall %} - {{ return(load_result('list_schemas').table) }} -{% endmacro %} - -{% macro sqlserver__create_schema(relation) -%} - {% call statement('create_schema') -%} - IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = '{{ relation.without_identifier().schema }}') - BEGIN - EXEC('CREATE SCHEMA {{ relation.without_identifier().schema }}') - END - {% endcall %} -{% endmacro %} - -{% macro sqlserver__drop_schema(relation) -%} - {%- set tables_in_schema_query %} - SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES - WHERE TABLE_SCHEMA = '{{ relation.schema }}' - {% endset %} - {% set tables_to_drop = run_query(tables_in_schema_query).columns[0].values() %} - {% for table in tables_to_drop %} - {%- set schema_relation = adapter.get_relation(database=relation.database, - schema=relation.schema, - identifier=table) -%} - {% do drop_relation(schema_relation) %} - {%- endfor %} - - {% call statement('drop_schema') -%} - IF EXISTS (SELECT * FROM sys.schemas WHERE name = '{{ relation.schema }}') - BEGIN - EXEC('DROP SCHEMA {{ relation.schema }}') - END - {% endcall %} -{% endmacro %} - -{# TODO make this function just a wrapper of sqlserver__drop_relation_script #} -{% macro sqlserver__drop_relation(relation) -%} - {% if relation.type == 'view' -%} - {% set object_id_type = 'V' %} - {% elif relation.type == 'table'%} - {% set object_id_type = 'U' %} - {%- else -%} invalid target name - {% endif %} - {% call statement('drop_relation', auto_begin=False) -%} - if object_id ('{{ relation.include(database=False) }}','{{ object_id_type }}') is not null - begin - drop {{ relation.type }} {{ relation.include(database=False) }} - end - {%- endcall %} -{% endmacro %} - -{% macro sqlserver__drop_relation_script(relation) -%} - {% if relation.type == 'view' -%} - {% set object_id_type = 'V' %} - {% elif relation.type == 'table'%} - {% set object_id_type = 'U' %} - {%- else -%} invalid target name - {% endif %} - if object_id ('{{ relation.include(database=False) }}','{{ object_id_type }}') is not null - begin - drop {{ relation.type }} {{ relation.include(database=False) }} - end -{% endmacro %} - -{% macro sqlserver__check_schema_exists(information_schema, schema) -%} - {% call statement('check_schema_exists', fetch_result=True, auto_begin=False) -%} - SELECT count(*) as schema_exist FROM sys.schemas WHERE name = '{{ schema }}' - {%- endcall %} - {{ return(load_result('check_schema_exists').table) }} -{% endmacro %} - -{% macro sqlserver__create_view_as(relation, sql) -%} - create view {{ relation.include(database=False) }} as - {{ sql }} -{% endmacro %} - -{# TODO Actually Implement the rename index piece #} -{# TODO instead of deleting it... #} -{% macro sqlserver__rename_relation(from_relation, to_relation) -%} - {% call statement('rename_relation') -%} - - rename object {{ from_relation.include(database=False) }} to {{ to_relation.identifier }} - {%- endcall %} -{% endmacro %} - -{% macro sqlserver__create_clustered_columnstore_index(relation) -%} - {%- set cci_name = relation.schema ~ '_' ~ relation.identifier ~ '_cci' -%} - {%- set relation_name = relation.schema ~ '_' ~ relation.identifier -%} - {%- set full_relation = relation.schema ~ '.' ~ relation.identifier -%} - if object_id ('{{relation_name}}.{{cci_name}}','U') is not null - begin - drop index {{relation_name}}.{{cci_name}} - end - - CREATE CLUSTERED COLUMNSTORE INDEX {{cci_name}} - ON {{full_relation}} -{% endmacro %} - -{% macro sqlserver__create_table_as(temporary, relation, sql) -%} - {%- set index = config.get('index', default="CLUSTERED COLUMNSTORE INDEX") -%} - {%- set dist = config.get('dist', default="ROUND_ROBIN") -%} - {% set tmp_relation = relation.incorporate( - path={"identifier": relation.identifier.replace("#", "") ~ '_temp_view'}, - type='view')-%} - {%- set temp_view_sql = sql.replace("'", "''") -%} - - {{ sqlserver__drop_relation_script(tmp_relation) }} - - {{ sqlserver__drop_relation_script(relation) }} - - EXEC('create view {{ tmp_relation.schema }}.{{ tmp_relation.identifier }} as - {{ temp_view_sql }} - '); - - CREATE TABLE {{ relation.include(database=False) }} - WITH( - DISTRIBUTION = {{dist}}, - {{index}} - ) - AS (SELECT * FROM {{ tmp_relation.schema }}.{{ tmp_relation.identifier }}) - - {{ sqlserver__drop_relation_script(tmp_relation) }} - -{% endmacro %} - -{% macro sqlserver__insert_into_from(to_relation, from_relation) -%} - {%- set full_to_relation = to_relation.schema ~ '.' ~ to_relation.identifier -%} - {%- set full_from_relation = from_relation.schema ~ '.' ~ from_relation.identifier -%} - - SELECT * INTO {{full_to_relation}} FROM {{full_from_relation}} - -{% endmacro %} - -{% macro sqlserver__current_timestamp() -%} - getdate() -{%- endmacro %} - -{% macro sqlserver__get_columns_in_relation(relation) -%} - {% call statement('get_columns_in_relation', fetch_result=True) %} - SELECT - column_name, - data_type, - character_maximum_length, - numeric_precision, - numeric_scale - FROM - (select - ordinal_position, - column_name, - data_type, - character_maximum_length, - numeric_precision, - numeric_scale - from INFORMATION_SCHEMA.COLUMNS - where table_name = '{{ relation.identifier }}' - and table_schema = '{{ relation.schema }}') cols - - - {% endcall %} - {% set table = load_result('get_columns_in_relation').table %} - {{ return(sql_convert_columns_in_relation(table)) }} -{% endmacro %} - -{% macro sqlserver__make_temp_relation(base_relation, suffix) %} - {% set tmp_identifier = '#' ~ base_relation.identifier ~ suffix %} - {% set tmp_relation = base_relation.incorporate( - path={"identifier": tmp_identifier}) -%} - - {% do return(tmp_relation) %} -{% endmacro %} - -{% macro sqlserver__snapshot_string_as_time(timestamp) -%} - {%- set result = "CONVERT(DATETIME2, '" ~ timestamp ~ "')" -%} - {{ return(result) }} -{%- endmacro %} \ No newline at end of file diff --git a/dbt/include/sqlserver/macros/catalog.sql b/dbt/include/sqlserver/macros/catalog.sql deleted file mode 100644 index dd871787..00000000 --- a/dbt/include/sqlserver/macros/catalog.sql +++ /dev/null @@ -1,51 +0,0 @@ - -{% macro sqlserver__get_catalog(information_schemas, schemas) -%} - - {%- call statement('catalog', fetch_result=True) -%} - - with tabs as ( - select - TABLE_CATALOG as table_database, - TABLE_SCHEMA as table_schema, - TABLE_NAME as table_name, - TABLE_TYPE as table_type, - TABLE_SCHEMA as table_owner, - null as table_comment - from INFORMATION_SCHEMA.TABLES - ), - - cols as ( - select - table_catalog as table_database, - table_schema, - table_name, - column_name, - ordinal_position as column_index, - data_type as column_type, - null as column_comment - from information_schema.columns - ) - - select - tabs.table_database, - tabs.table_schema, - tabs.table_name, - tabs.table_type, - tabs.table_comment, - tabs.table_owner, - cols.column_name, - cols.column_index, - cols.column_type, - cols.column_comment - from tabs - join cols on - tabs.table_database = cols.table_database - and tabs.table_schema = cols.table_schema - and tabs.table_name = cols.table_name - order by column_index - - {%- endcall -%} - - {{ return(load_result('catalog').table) }} - -{%- endmacro %} diff --git a/dbt/include/sqlserver/macros/materializations/snapshot/strategies.sql b/dbt/include/sqlserver/macros/materializations/snapshot/strategies.sql deleted file mode 100644 index 812f63fc..00000000 --- a/dbt/include/sqlserver/macros/materializations/snapshot/strategies.sql +++ /dev/null @@ -1,44 +0,0 @@ -{% macro sqlserver__snapshot_hash_arguments(args) %} - CONVERT(VARCHAR(32), HashBytes('MD5', {% for arg in args %} - coalesce(CONVERT(varchar, {{ arg }}, 121), '') {% if not loop.last %} + '|' + {% endif %} - {% endfor %}), 2) -{% endmacro %} - -{% macro snapshot_check_strategy(node, snapshotted_rel, current_rel, config, target_exists) %} - {% set check_cols_config = config['check_cols'] %} - {% set primary_key = config['unique_key'] %} - {% set updated_at = snapshot_get_time() %} - - {% if check_cols_config == 'all' %} - {% set check_cols = get_columns_in_query(node['injected_sql']) %} - {% elif check_cols_config is iterable and (check_cols_config | length) > 0 %} - {% set check_cols = check_cols_config %} - {% else %} - {% do exceptions.raise_compiler_error("Invalid value for 'check_cols': " ~ check_cols_config) %} - {% endif %} - - {% set row_changed_expr -%} - ( - {% for col in check_cols %} - {{ snapshotted_rel }}.{{ col }} != {{ current_rel }}.{{ col }} - or - ( - (({{ snapshotted_rel }}.{{ col }} is null) and not ({{ current_rel }}.{{ col }} is null)) - or - ((not {{ snapshotted_rel }}.{{ col }} is null) and ({{ current_rel }}.{{ col }} is null)) - ) - {%- if not loop.last %} or {% endif %} - - {% endfor %} - ) - {%- endset %} - - {% set scd_id_expr = sqlserver__snapshot_hash_arguments([primary_key, updated_at]) %} - - {% do return({ - "unique_key": primary_key, - "updated_at": updated_at, - "row_changed": row_changed_expr, - "scd_id": scd_id_expr - }) %} -{% endmacro %} \ No newline at end of file diff --git a/dbt/include/sqlserver/__init__.py b/dbt/include/synapse/__init__.py similarity index 100% rename from dbt/include/sqlserver/__init__.py rename to dbt/include/synapse/__init__.py diff --git a/dbt/include/sqlserver/dbt_project.yml b/dbt/include/synapse/dbt_project.yml similarity index 74% rename from dbt/include/sqlserver/dbt_project.yml rename to dbt/include/synapse/dbt_project.yml index 08aa128f..5cd1fd8c 100644 --- a/dbt/include/sqlserver/dbt_project.yml +++ b/dbt/include/synapse/dbt_project.yml @@ -1,5 +1,5 @@ -name: dbt_sqlserver +name: dbt_synapse version: 1.0 config-version: 2 diff --git a/dbt/include/synapse/macros/adapters.sql b/dbt/include/synapse/macros/adapters.sql new file mode 100644 index 00000000..a9b00543 --- /dev/null +++ b/dbt/include/synapse/macros/adapters.sql @@ -0,0 +1,158 @@ +{% macro synapse__information_schema_name(database) -%} + {{ return(sqlserver__information_schema_name(database)) }} +{%- endmacro %} + +{% macro synapse__get_columns_in_query(select_sql) %} + {% call statement('get_columns_in_query', fetch_result=True, auto_begin=False) -%} + select TOP 0 * from ( + {{ select_sql }} + ) as __dbt_sbq + {% endcall %} + {{ return(load_result('get_columns_in_query').table.columns | map(attribute='name') | list) }} +{% endmacro %} + +{% macro synapse__list_relations_without_caching(schema_relation) %} + {{ return(sqlserver__list_relations_without_caching(schema_relation)) }} +{% endmacro %} + +{% macro synapse__list_schemas(database) %} + {{ return(sqlserver__list_schemas(database)) }} +{% endmacro %} + +{% macro synapse__create_schema(relation) -%} + {% call statement('create_schema') -%} + IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = '{{ relation.without_identifier().schema }}') + BEGIN + EXEC('CREATE SCHEMA {{ relation.without_identifier().schema }}') + END + {% endcall %} +{% endmacro %} + +{% macro synapse__drop_schema(relation) -%} + {%- set tables_in_schema_query %} + SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES + WHERE TABLE_SCHEMA = '{{ relation.schema }}' + {% endset %} + {% set tables_to_drop = run_query(tables_in_schema_query).columns[0].values() %} + {% for table in tables_to_drop %} + {%- set schema_relation = adapter.get_relation(database=relation.database, + schema=relation.schema, + identifier=table) -%} + {% do drop_relation(schema_relation) %} + {%- endfor %} + + {% call statement('drop_schema') -%} + IF EXISTS (SELECT * FROM sys.schemas WHERE name = '{{ relation.schema }}') + BEGIN + EXEC('DROP SCHEMA {{ relation.schema }}') + END {% endcall %} +{% endmacro %} + +{# TODO make this function just a wrapper of synapse__drop_relation_script #} +{% macro synapse__drop_relation(relation) -%} + {{ return(sqlserver__drop_relation(relation)) }} +{% endmacro %} + +{% macro synapse__drop_relation_script(relation) -%} + {{ return(sqlserver__drop_relation_script(relation)) }} +{% endmacro %} + +{% macro synapse__check_schema_exists(information_schema, schema) -%} + {{ return(sqlserver__check_schema_exists(information_schema, schema)) }} +{% endmacro %} + +{% macro synapse__create_view_as(relation, sql) -%} + create view {{ relation.include(database=False) }} as + {{ sql }} +{% endmacro %} + +{# TODO Actually Implement the rename index piece #} +{# TODO instead of deleting it... #} +{% macro synapse__rename_relation(from_relation, to_relation) -%} + {% call statement('rename_relation') -%} + + rename object {{ from_relation.include(database=False) }} to {{ to_relation.identifier }} + {%- endcall %} +{% endmacro %} + +{% macro synapse__create_clustered_columnstore_index(relation) -%} + {%- set cci_name = relation.schema ~ '_' ~ relation.identifier ~ '_cci' -%} + {%- set relation_name = relation.schema ~ '_' ~ relation.identifier -%} + {%- set full_relation = relation.schema ~ '.' ~ relation.identifier -%} + if object_id ('{{relation_name}}.{{cci_name}}','U') is not null + begin + drop index {{relation_name}}.{{cci_name}} + end + + CREATE CLUSTERED COLUMNSTORE INDEX {{cci_name}} + ON {{full_relation}} +{% endmacro %} + +{% macro synapse__create_table_as(temporary, relation, sql) -%} + {%- set index = config.get('index', default="CLUSTERED COLUMNSTORE INDEX") -%} + {%- set dist = config.get('dist', default="ROUND_ROBIN") -%} + {% set tmp_relation = relation.incorporate( + path={"identifier": relation.identifier.replace("#", "") ~ '_temp_view'}, + type='view')-%} + {%- set temp_view_sql = sql.replace("'", "''") -%} + + {{ synapse__drop_relation_script(tmp_relation) }} + + {{ synapse__drop_relation_script(relation) }} + + EXEC('create view {{ tmp_relation.schema }}.{{ tmp_relation.identifier }} as + {{ temp_view_sql }} + '); + + CREATE TABLE {{ relation.include(database=False) }} + WITH( + DISTRIBUTION = {{dist}}, + {{index}} + ) + AS (SELECT * FROM {{ tmp_relation.schema }}.{{ tmp_relation.identifier }}) + + {{ synapse__drop_relation_script(tmp_relation) }} + +{% endmacro %} + +{% macro synapse__insert_into_from(to_relation, from_relation) -%} + {{ return(sqlserver__insert_into_from(to_relation, from_relation)) }} +{% endmacro %} + +{% macro synapse__current_timestamp() -%} + {{ return(sqlserver__current_timestamp()) }} +{%- endmacro %} + +{% macro synapse__get_columns_in_relation(relation) -%} + {% call statement('get_columns_in_relation', fetch_result=True) %} + SELECT + column_name, + data_type, + character_maximum_length, + numeric_precision, + numeric_scale + FROM + (select + ordinal_position, + column_name, + data_type, + character_maximum_length, + numeric_precision, + numeric_scale + from INFORMATION_SCHEMA.COLUMNS + where table_name = '{{ relation.identifier }}' + and table_schema = '{{ relation.schema }}') cols + + + {% endcall %} + {% set table = load_result('get_columns_in_relation').table %} + {{ return(sql_convert_columns_in_relation(table)) }} +{% endmacro %} + +{% macro synapse__make_temp_relation(base_relation, suffix) %} + {{ return(sqlserver__make_temp_relation(base_relation, suffix)) }} +{% endmacro %} + +{% macro synapse__snapshot_string_as_time(timestamp) -%} + {{ return(sqlserver__snapshot_string_as_time(timestamp)) }} +{%- endmacro %} \ No newline at end of file diff --git a/dbt/include/synapse/macros/catalog.sql b/dbt/include/synapse/macros/catalog.sql new file mode 100644 index 00000000..7040cf60 --- /dev/null +++ b/dbt/include/synapse/macros/catalog.sql @@ -0,0 +1,4 @@ + +{% macro synapse__get_catalog(information_schemas, schemas) -%} + {{ return(sqlserver__get_catalog(information_schemas, schemas)) }} +{%- endmacro %} diff --git a/dbt/include/sqlserver/macros/indexes.sql b/dbt/include/synapse/macros/indexes.sql similarity index 90% rename from dbt/include/sqlserver/macros/indexes.sql rename to dbt/include/synapse/macros/indexes.sql index d4195e73..a728a4da 100644 --- a/dbt/include/sqlserver/macros/indexes.sql +++ b/dbt/include/synapse/macros/indexes.sql @@ -105,29 +105,10 @@ declare @drop_remaining_indexes_last nvarchar(max) = ( {% macro create_clustered_index(columns, unique=False) -%} - -{{ log("Creating clustered index...") }} - -create -{% if unique -%} -unique -{% endif %} -clustered index - {{ this.table }}__clustered_index_on_{{ columns|join("_") }} - on {{ this }} ({{ '[' + columns|join("], [") + ']' }}) - + {{ return(create_clustered_index(columns, unique=False)) }} {%- endmacro %} {% macro create_nonclustered_index(columns, includes=False) %} - -{{ log("Creating nonclustered index...") }} - -create nonclustered index - {{ this.table }}__index_on_{{ columns|join("_") }} - on {{ this }} ({{ '[' + columns|join("], [") + ']' }}) - {% if includes -%} - include ({{ '[' + includes|join("], [") + ']' }}) - {% endif %} - + {{ return(create_nonclustered_index(columns, includes=False)) }} {% endmacro %} diff --git a/dbt/include/sqlserver/macros/materializations/incremental/incremental.sql b/dbt/include/synapse/macros/materializations/incremental/incremental.sql similarity index 89% rename from dbt/include/sqlserver/macros/materializations/incremental/incremental.sql rename to dbt/include/synapse/macros/materializations/incremental/incremental.sql index 7852be94..c3c26630 100644 --- a/dbt/include/sqlserver/macros/materializations/incremental/incremental.sql +++ b/dbt/include/synapse/macros/materializations/incremental/incremental.sql @@ -1,16 +1,8 @@ {% macro dbt__incremental_delete(target_relation, tmp_relation) -%} - {%- set unique_key = config.require('unique_key') -%} - - delete - from {{ target_relation }} - where ({{ unique_key }}) in ( - select ({{ unique_key }}) - from {{ tmp_relation.include(schema=False, database=False) }} - ); - + {{ return(dbt__incremental_delete(target_relation, tmp_relation)) }} {%- endmacro %} -{% materialization incremental, adapter='sqlserver' -%} +{% materialization incremental, adapter='synapse' -%} {%- set unique_key = config.get('unique_key') -%} {%- set identifier = model['alias'] -%} diff --git a/dbt/include/sqlserver/macros/materializations/seed/seed.sql b/dbt/include/synapse/macros/materializations/seed/seed.sql similarity index 89% rename from dbt/include/sqlserver/macros/materializations/seed/seed.sql rename to dbt/include/synapse/macros/materializations/seed/seed.sql index 357095f6..0c98441b 100644 --- a/dbt/include/sqlserver/macros/materializations/seed/seed.sql +++ b/dbt/include/synapse/macros/materializations/seed/seed.sql @@ -1,6 +1,8 @@ -{% macro sqlserver__basic_load_csv_rows(model, batch_size, agate_table) %} +{% macro synapse__basic_load_csv_rows(model, batch_size, agate_table) %} + {# Synapse does not support the TSQL's normal Table Value Constructor of #} {# INSERT INTO Dest_Table (cols) SELECT cols FROM Ref_Table #} + {% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %} {% set bindings = [] %} @@ -45,6 +47,6 @@ {{ return(statements[0]) }} {% endmacro %} -{% macro sqlserver__load_csv_rows(model, agate_table) %} - {{ return(sqlserver__basic_load_csv_rows(model, 200, agate_table) )}} +{% macro synapse__load_csv_rows(model, agate_table) %} + {{ return(synapse__basic_load_csv_rows(model, 200, agate_table) )}} {% endmacro %} \ No newline at end of file diff --git a/dbt/include/sqlserver/macros/materializations/seed/str_replace.sql b/dbt/include/synapse/macros/materializations/seed/str_replace.sql similarity index 100% rename from dbt/include/sqlserver/macros/materializations/seed/str_replace.sql rename to dbt/include/synapse/macros/materializations/seed/str_replace.sql diff --git a/dbt/include/sqlserver/macros/materializations/snapshot/snapshot.sql b/dbt/include/synapse/macros/materializations/snapshot/snapshot.sql similarity index 89% rename from dbt/include/sqlserver/macros/materializations/snapshot/snapshot.sql rename to dbt/include/synapse/macros/materializations/snapshot/snapshot.sql index 5cbace9f..6b6be521 100644 --- a/dbt/include/sqlserver/macros/materializations/snapshot/snapshot.sql +++ b/dbt/include/synapse/macros/materializations/snapshot/snapshot.sql @@ -1,10 +1,9 @@ -{% macro sqlserver__post_snapshot(staging_relation) %} - -- Clean up the snapshot temp table - {% do drop_relation(staging_relation) %} +{% macro synapse__post_snapshot(staging_relation) %} + {{ return(sqlserver__post_snapshot(staging_relation)) }} {% endmacro %} -{% macro sqlserver__build_snapshot_staging_table(strategy, sql, target_relation) %} +{% macro synapse__build_snapshot_staging_table(strategy, sql, target_relation) %} {% set tmp_relation = default__make_temp_relation(target_relation, "pseudotmp") %} {% set select = snapshot_staging_table(strategy, sql, target_relation) %} @@ -16,7 +15,7 @@ {% do return(tmp_relation) %} {% endmacro %} -{% materialization snapshot, adapter='sqlserver' %} +{% materialization snapshot, adapter='synapse' %} {%- set config = model['config'] -%} {%- set target_table = model.get('alias', model.get('name')) -%} @@ -55,7 +54,7 @@ {{ adapter.valid_snapshot_target(target_relation) }} - {% set staging_table = sqlserver__build_snapshot_staging_table(strategy, sql, target_relation) %} + {% set staging_table = synapse__build_snapshot_staging_table(strategy, sql, target_relation) %} -- this may no-op if the database does not require column expansion {% do adapter.expand_target_column_types(from_relation=staging_table, @@ -81,7 +80,7 @@ {% do quoted_source_columns.append(adapter.quote(column.name)) %} {% endfor %} - {% set final_sql = sqlserver__snapshot_merge_sql( + {% set final_sql = synapse__snapshot_merge_sql( target = target_relation, source = staging_table, insert_cols = quoted_source_columns diff --git a/dbt/include/sqlserver/macros/materializations/snapshot/snapshot_merge.sql b/dbt/include/synapse/macros/materializations/snapshot/snapshot_merge.sql similarity index 90% rename from dbt/include/sqlserver/macros/materializations/snapshot/snapshot_merge.sql rename to dbt/include/synapse/macros/materializations/snapshot/snapshot_merge.sql index 10973b69..de10416b 100644 --- a/dbt/include/sqlserver/macros/materializations/snapshot/snapshot_merge.sql +++ b/dbt/include/synapse/macros/materializations/snapshot/snapshot_merge.sql @@ -1,4 +1,4 @@ -{% macro sqlserver__snapshot_merge_sql(target, source, insert_cols) -%} +{% macro synapse__snapshot_merge_sql(target, source, insert_cols) -%} {%- set insert_cols_csv = insert_cols | join(', ') -%} EXEC(' diff --git a/dbt/include/synapse/macros/materializations/snapshot/strategies.sql b/dbt/include/synapse/macros/materializations/snapshot/strategies.sql new file mode 100644 index 00000000..348a0b0e --- /dev/null +++ b/dbt/include/synapse/macros/materializations/snapshot/strategies.sql @@ -0,0 +1,3 @@ +{% macro synapse__snapshot_hash_arguments(args) %} + {{ return(sqlserver__snapshot_hash_arguments(args)) }} +{% endmacro %} diff --git a/pyodbc.Dockerfile b/pyodbc.Dockerfile deleted file mode 100644 index 81144b02..00000000 --- a/pyodbc.Dockerfile +++ /dev/null @@ -1,47 +0,0 @@ - -FROM python:3.7-slim AS base - -ADD requirements.txt ./ - -# Setup dependencies for pyodbc -RUN \ - apt-get update && \ - apt-get install -y curl build-essential unixodbc-dev g++ apt-transport-https && \ - gpg --keyserver hkp://keys.gnupg.net --recv-keys 5072E1F5 - -# install netcat (i.e. `nc` command) -RUN apt install -y netcat - -RUN \ - export ACCEPT_EULA='Y' && \ - # Install pyodbc db drivers for MSSQL - curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add - && \ - curl https://packages.microsoft.com/config/debian/9/prod.list > /etc/apt/sources.list.d/mssql-release.list && \ - apt-get update && \ - apt-get install -y msodbcsql17 odbc-postgresql mssql-tools - -# add sqlcmd to the path -ENV PATH="$PATH:/opt/mssql-tools/bin" - -# Update odbcinst.ini to make sure full path to driver is listed -RUN \ - sed 's/Driver=psql/Driver=\/usr\/lib\/x86_64-linux-gnu\/odbc\/psql/' /etc/odbcinst.ini > /tmp/temp.ini && \ - mv -f /tmp/temp.ini /etc/odbcinst.ini -# Install pip -RUN \ - pip install --upgrade pip && \ - pip install -r requirements.txt && \ - rm requirements.txt -# permission management -RUN \ - chmod +rwx /etc/ssl/openssl.cnf && \ - # change TLS back to version 1 - sed -i 's/TLSv1.2/TLSv1/g' /etc/ssl/openssl.cnf && \ - # allow weak certificates (certificate signed with SHA1) - # by downgrading OpenSSL security level from 2 to 1 - sed -i 's/SECLEVEL=2/SECLEVEL=1/g' /etc/ssl/openssl.cnf - -RUN \ - # Cleanup build dependencies - apt-get remove -y curl apt-transport-https debconf-utils g++ gcc rsync build-essential gnupg2 && \ - apt-get autoremove -y && apt-get autoclean -y \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 3ad15587..de9bf71b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,4 @@ -dbt-core~=0.18.0 -pyodbc>=4.0.27 -azure-identity>=1.4.0 +dbt-sqlserver==0.18.1 black~=20.8b1 pytest-dbt-adapter~=0.3.0 tox==3.2.0 diff --git a/setup.py b/setup.py index cce46c66..0c833fec 100644 --- a/setup.py +++ b/setup.py @@ -14,9 +14,9 @@ # get this from a separate file -def _dbt_sqlserver_version(): +def _dbt_synapse_version(): _version_path = os.path.join( - this_directory, 'dbt', 'adapters', 'sqlserver', '__version__.py' + this_directory, 'dbt', 'adapters', 'synapse', '__version__.py' ) _version_pattern = r'''version\s*=\s*["'](.+)["']''' with open(_version_path) as f: @@ -26,7 +26,7 @@ def _dbt_sqlserver_version(): return match.group(1) -package_version = _dbt_sqlserver_version() +package_version = _dbt_synapse_version() description = """An Azure Synapse adpter plugin for dbt (data build tool)""" dbt_version = '0.18.1' @@ -47,15 +47,16 @@ def _dbt_sqlserver_version(): license="MIT", author=", ".join(authors_list), author_email="swanson.anders@gmail.com", - url="https://github.com/swanderz/dbt-synapse", + url="https://github.com/dbt-msft/dbt-synapse", packages=find_packages(), package_data={ "dbt": [ - "include/sqlserver/dbt_project.yml", - "include/sqlserver/macros/*.sql", - "include/sqlserver/macros/**/*.sql", - "include/sqlserver/macros/**/**/*.sql", + "include/synapse/dbt_project.yml", + "include/synapse/macros/*.sql", + "include/synapse/macros/**/*.sql", ] }, - install_requires=["dbt-core~=0.18.1", "pyodbc>=4.0.27", "azure-identity>=1.4.0"], -) \ No newline at end of file + install_requires=[ + "dbt-sqlserver==0.18.1", + ], +) diff --git a/test/integration/azuresql.dbtspec b/test/integration/azuresql.dbtspec deleted file mode 100644 index ed464781..00000000 --- a/test/integration/azuresql.dbtspec +++ /dev/null @@ -1,23 +0,0 @@ - -target: - type: sqlserver - driver: "ODBC Driver 17 for SQL Server" - port: 1433 - host: "{{ env_var('DBT_AZURESQL_SERVER') }}" - database: "{{ env_var('DBT_AZURESQL_DB') }}" - username: "{{ env_var('DBT_AZURESQL_UID') }}" - password: "{{ env_var('DBT_AZURESQL_PWD') }}" - schema: "dbt_test_azure_sql_{{ var('_dbt_random_suffix') }}" - encrypt: yes - trust_cert: yes - threads: 1 -sequences: - test_dbt_empty: empty - test_dbt_base: base - test_dbt_ephemeral: ephemeral - test_dbt_incremental: incremental - test_dbt_snapshot_strategy_timestamp: snapshot_strategy_timestamp - # test_dbt_snapshot_strategy_check_cols: snapshot_strategy_check_cols - test_dbt_data_test: data_test - test_dbt_schema_test: schema_test - # test_dbt_ephemeral_data_tests: data_test_ephemeral_models diff --git a/test/integration/sqlserver.dbtspec b/test/integration/synapse.dbtspec similarity index 97% rename from test/integration/sqlserver.dbtspec rename to test/integration/synapse.dbtspec index f9ca8c22..38a1ffdd 100644 --- a/test/integration/sqlserver.dbtspec +++ b/test/integration/synapse.dbtspec @@ -1,5 +1,5 @@ target: - type: sqlserver + type: synapse driver: "ODBC Driver 17 for SQL Server" schema: "dbt_test_{{ var('_dbt_random_suffix') }}" host: "{{ env_var('DBT_SYNAPSE_SERVER') }}" diff --git a/tox.ini b/tox.ini index 102a4e0d..8a2cbb6d 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,7 @@ envlist = unit, flake8, integration-synapse [testenv:integration-synapse] basepython = python3 -commands = /bin/bash -c '{envpython} -m pytest -v test/integration/sqlserver.dbtspec' +commands = /bin/bash -c '{envpython} -m pytest -v test/integration/synapse.dbtspec' passenv = DBT_SYNAPSE_DB DBT_SYNAPSE_PORT DBT_SYNAPSE_PWD DBT_SYNAPSE_SERVER DBT_SYNAPSE_UID deps = -r{toxinidir}/requirements.txt