From 847688c33af043563323b41290478eb486f253f1 Mon Sep 17 00:00:00 2001 From: Matthew Durand Date: Mon, 25 Mar 2024 15:23:58 -0700 Subject: [PATCH] Add s3 benchmark and deploy pipeline (#66) * rename remove_blobfs_files role - remove both blobfs and kvfs files * Rename remove_blobfs role and task name * rename format_blobfs role * rename format_blobfs subtasks * add log dir var * Refactor for KVFS - put the format script in conf dir, not bin dir - put the log file in log dir, not bin dir - use abs paths for everything * KVFS enhancements - stat for dss_formatter and set var - rename substasks * rename template and refactor for kvfs * handle both blobfs and kvfs * handle legacy and new conf file names * Only parse log for errors if blobfs * save dss_target.out to log dir, not bin dir * update path of dss_target.py.out * update dss_target.py.out path * remove reference to unused gcc var for datamover * don't deploy GCC when deploying client * remove gcc from datamover * Don't deploy GCC * remove gcc defaults * Stat dss_formatter and install gcc if not present * Optionally call gcc setenv only if needed * revise start target - use abs path of target script - put target script in conf dir, not bin dir * fix gcc condition with dss_formatter - incorrectly used inverse logic * put minio scripts in conf dir, not bin dir * add format disks err file * don't run compaction if dss_formatter is present * Revise format logic - blobfs logfile now created from template script - kvfs format logfile created after format task - if kvfs format fails - no logfile - missing logfile used for criteria for future format decision - this accommodates dss_formatter returns non-0 on failed format * Cleanup start_dss_host - put script in /etc/dss not bin dir - output logs to /var/log/dss - don't needlessly loop through dss_host.py tasks * Output logs to /var/log/dss * execute spdk setup script by abs path - don't output log if empty * remove correct paths during format * execute spdk script directly, not dss_Target.py reset * don't run compaction if dss_formatter * remove chdir args * allow dss_target_config.sh script to be called anywhere - no chdir needed * set compaction default to 'no' - revert compaction string on false condition * remove target conf dir on uninstall * Only check blobfs format log if the format file output exists * Don't check if file exists, we must assume it exists if we are runnign this check * Add deploy stage to dss-ansible --- .gitignore | 7 +- .gitlab-ci.yml | 24 ++--- .gitlab/ansible.yml | 46 +++++++++ .gitlab/build.yml | 47 +++++++++ .gitlab/defaults.yml | 10 ++ .gitlab/deploy.yml | 27 +++++ .gitlab/lint.yml | 8 ++ README.md | 37 ++++--- group_vars/all.yml | 13 ++- playbooks/deploy_client.yml | 1 - playbooks/deploy_dss_software.yml | 1 - playbooks/start_datamover.yml | 1 - playbooks/test_nkv_test_cli.yml | 32 ++++-- roles/cleanup_dss_minio/defaults/main.yml | 1 + .../tasks/execute_cleanup_script.yml | 9 +- roles/create_datamover_conf/defaults/main.yml | 7 +- roles/deploy_dss_target/tasks/main.yml | 26 +++++ roles/deploy_dss_target/vars/main.yml | 6 ++ .../templates/blobfs.mkfs.in.conf.j2 | 37 ------- roles/format_disks/tasks/main.yml | 4 +- .../defaults/main.yml | 2 +- .../format_kvfs/tasks/check_blobfs_format.yml | 38 +++++++ .../tasks/format_kvfs.yml} | 82 +++++++-------- .../tasks/main.yml | 21 ++-- .../templates/format_disks.sh.j2 | 9 +- .../templates/kvfs.mkfs.in.conf.j2 | 7 ++ roles/format_kvfs/vars/main.yml | 3 + roles/remove_dss_software/defaults/main.yml | 17 ++++ roles/remove_dss_software/tasks/main.yml | 11 +++ .../defaults/main.yml | 1 + .../tasks/main.yml | 16 +-- roles/reset_spdk/tasks/main.yml | 4 +- roles/start_compaction/tasks/main.yml | 35 ++----- .../tasks/start_compaction.yml | 26 +++++ roles/start_datamover/defaults/main.yml | 6 +- roles/start_datamover/tasks/datamover_DEL.yml | 3 - roles/start_datamover/tasks/datamover_GET.yml | 3 - .../start_datamover/tasks/datamover_LIST.yml | 3 - roles/start_datamover/tasks/datamover_PUT.yml | 3 - .../start_datamover/tasks/datamover_TEST.yml | 3 - roles/start_datamover/tasks/main.yml | 13 ++- roles/start_dss_host/defaults/main.yml | 1 + .../tasks/check_and_configure_driver.yml | 5 + .../start_dss_host/tasks/configure_driver.yml | 78 +++++++-------- roles/start_dss_host/tasks/main.yml | 4 +- .../tasks/setup_host_clusters.yml | 27 +++-- roles/start_dss_minio/defaults/main.yml | 1 + .../tasks/generate_scripts.yml | 2 +- .../tasks/start_minio_clusters.yml | 4 +- roles/start_dss_target/tasks/dss_target.yml | 14 +-- roles/start_dss_target/tasks/main.yml | 25 ++--- .../tasks/collect_target_scripts.yml | 7 +- roles/test_nkv_test_cli/defaults/main.yml | 13 ++- roles/test_nkv_test_cli/tasks/case.yml | 18 ++++ roles/test_nkv_test_cli/tasks/main.yml | 68 +++++-------- .../test_nkv_test_cli/tasks/nkv_test_cli.yml | 98 +++++++++++++----- roles/test_nkv_test_cli/tasks/smoke.yml | 52 ++++++++++ roles/test_nkv_test_cli/tasks/suite.yml | 26 +++++ roles/test_nkv_test_cli/vars/main.yml | 9 ++ roles/test_nkv_test_cli/vars/suite001.yml | 99 +++++++++++++++++++ roles/test_nkv_test_cli/vars/suite002.yml | 26 +++++ roles/test_nkv_test_cli/vars/suite003.yml | 87 ++++++++++++++++ roles/test_s3_benchmark/defaults/main.yml | 1 + .../test_s3_benchmark/tasks/s3_benchmark.yml | 1 + scripts/dependencies/install.sh | 28 ++++++ scripts/dependencies/os/common.sh | 28 ++++++ scripts/dependencies/os/rocky.sh | 50 ++++++++++ scripts/dependencies/python/requirements.txt | 7 ++ scripts/docker/rocky8.DOCKERFILE | 6 ++ 69 files changed, 1080 insertions(+), 355 deletions(-) create mode 100644 .gitlab/ansible.yml create mode 100644 .gitlab/build.yml create mode 100644 .gitlab/defaults.yml create mode 100644 .gitlab/deploy.yml create mode 100644 .gitlab/lint.yml create mode 100644 roles/deploy_dss_target/vars/main.yml delete mode 100644 roles/format_blobfs/templates/blobfs.mkfs.in.conf.j2 rename roles/{format_blobfs => format_kvfs}/defaults/main.yml (98%) create mode 100644 roles/format_kvfs/tasks/check_blobfs_format.yml rename roles/{format_blobfs/tasks/format_blobfs.yml => format_kvfs/tasks/format_kvfs.yml} (56%) rename roles/{format_blobfs => format_kvfs}/tasks/main.yml (86%) rename roles/{format_blobfs => format_kvfs}/templates/format_disks.sh.j2 (82%) create mode 100644 roles/format_kvfs/templates/kvfs.mkfs.in.conf.j2 create mode 100644 roles/format_kvfs/vars/main.yml rename roles/{remove_blobfs_files => remove_kvfs_files}/defaults/main.yml (98%) rename roles/{remove_blobfs_files => remove_kvfs_files}/tasks/main.yml (86%) create mode 100644 roles/start_compaction/tasks/start_compaction.yml create mode 100644 roles/start_dss_host/tasks/check_and_configure_driver.yml create mode 100644 roles/test_nkv_test_cli/tasks/case.yml create mode 100644 roles/test_nkv_test_cli/tasks/smoke.yml create mode 100644 roles/test_nkv_test_cli/tasks/suite.yml create mode 100644 roles/test_nkv_test_cli/vars/suite001.yml create mode 100644 roles/test_nkv_test_cli/vars/suite002.yml create mode 100644 roles/test_nkv_test_cli/vars/suite003.yml create mode 100755 scripts/dependencies/install.sh create mode 100755 scripts/dependencies/os/common.sh create mode 100644 scripts/dependencies/os/rocky.sh create mode 100644 scripts/dependencies/python/requirements.txt create mode 100644 scripts/docker/rocky8.DOCKERFILE diff --git a/.gitignore b/.gitignore index 1824534f..f8bfa6b4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,8 @@ # Internal testing inventory files inv_* -.vscode/** + +# VS Code config +.vscode + +# JUNIT XML Test Results +*.xml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cccffe8c..641a2d56 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,21 +1,9 @@ -variables: - BRANCH_NAME: $CI_COMMIT_BRANCH - SONAR_BRANCH: -Dsonar.branch.name=$CI_COMMIT_BRANCH - -image: - name: dss-build_$BRANCH_NAME - -workflow: - rules: - - if: $CI_PIPELINE_SOURCE == "merge_request_event" - variables: - BRANCH_NAME: $CI_MERGE_REQUEST_TARGET_BRANCH_NAME - - if: $CI_COMMIT_BRANCH == "master" && $CI_PIPELINE_SOURCE == "push" - - if: $CI_COMMIT_BRANCH =~ /^(stable|feature)\/.*/ && $CI_PIPELINE_SOURCE == "push" +include: + - .gitlab/defaults.yml stages: + - build - lint - -ansible-lint: - stage: lint - script: ansible-lint * + - deploy + - test + - sync diff --git a/.gitlab/ansible.yml b/.gitlab/ansible.yml new file mode 100644 index 00000000..8073be3a --- /dev/null +++ b/.gitlab/ansible.yml @@ -0,0 +1,46 @@ +deploy DSS: + stage: deploy + image: + name: $CI_REGISTRY/$ANSIBLE_PROJECT_PATH/$BRANCH_NAME:$DOCKERFILE_NAME + pull_policy: always + environment: + name: $ANSIBLE_INVENTORY + url: $CI_SERVER_URL/dfs/dss/dss-ansible/-/blob/inventory/$ANSIBLE_INVENTORY + before_script: + # Clone ansible repo + - git config --global http.sslVerify false + - git config --global user.name "$CI_USERNAME" + - git config --global user.email "$CI_EMAIL" + - git clone https://$CI_USERNAME:$CI_TOKEN@$CI_SERVER_HOST/$ANSIBLE_PROJECT_PATH.git --branch $ANSIBLE_BRANCH ../dss-ansible + - cd ../dss-ansible + # Get inventory file + - git fetch origin inventory + - git restore --source origin/inventory -- $ANSIBLE_INVENTORY + # Hack to disregard task output from JUNIT callback module + - sed -i -E "s/dump =.+/dump = ''/g" /usr/local/lib/python3.11/site-packages/ansible/plugins/callback/junit.py + script: + - | + ansible-playbook -i $ANSIBLE_INVENTORY playbooks/download_artifacts.yml \ + -e "download_artifacts=true" \ + -e "artifacts_url=$MINIO_HOST_URL/dss-artifacts" \ + -e "artifacts_branch=$BRANCH_NAME" + - ansible-playbook -i $ANSIBLE_INVENTORY playbooks/remove_dss_software.yml + - ansible-playbook -i $ANSIBLE_INVENTORY playbooks/deploy_dss_software.yml + artifacts: + when: always + reports: + junit: "*.xml" + variables: + ANSIBLE_PROJECT_PATH: dfs/dss/dss-ansible + ANSIBLE_BRANCH: MIN-2148-add-deploy-stage + GIT_STRATEGY: none + DOCKERFILE_NAME: rocky8 + ANSIBLE_CONFIG: ../dss-ansible/ansible.cfg + ANSIBLE_INVENTORY: inv_$CI_PROJECT_NAME.ini + ANSIBLE_FORCE_COLOR: "true" + JUNIT_OUTPUT_DIR: $CI_PROJECT_DIR + JUNIT_TASK_CLASS: "yes" + JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT: "no" + ANSIBLE_CALLBACK_WHITELIST: junit + rules: + - !reference [.default_rules, merge_and_push] diff --git a/.gitlab/build.yml b/.gitlab/build.yml new file mode 100644 index 00000000..6a67eb18 --- /dev/null +++ b/.gitlab/build.yml @@ -0,0 +1,47 @@ +build docker: + stage: build + image: docker:25.0.3-git + variables: + ANSIBLE_PROJECT_PATH: dfs/dss/dss-ansible + ANSIBLE_BRANCH: MIN-2148-add-deploy-stage + GIT_STRATEGY: none + DOCKERFILE_NAME: rocky8 + DOCKERFILE_PATH: scripts/docker/$DOCKERFILE_NAME.DOCKERFILE + # IMAGE_TAG: $CI_REGISTRY_IMAGE/$BRANCH_NAME:$DOCKERFILE_NAME + IMAGE_TAG: $CI_REGISTRY/$ANSIBLE_PROJECT_PATH/$BRANCH_NAME:$DOCKERFILE_NAME + CACHE_TAG: ${IMAGE_TAG}-cache + before_script: + # Clone dss-ansible repo + - git config --global http.sslVerify false + - git config --global user.name "$CI_USERNAME" + - git config --global user.email "$CI_EMAIL" + - git clone https://$CI_USERNAME:$CI_TOKEN@$CI_SERVER_HOST/$ANSIBLE_PROJECT_PATH.git --branch $ANSIBLE_BRANCH . + # Install certs so buildkit can access Gitlab container registry + - echo "$SSI_ROOTCA_CERT" > /usr/local/share/ca-certificates/SSI-RootCA.crt + - echo "$SSI_ISSUINGCA_CERT" > /usr/local/share/ca-certificates/SSI-ISSUINGCA.crt + - echo "$MSL_ETX_CERT" > /usr/local/share/ca-certificates/msl-etx.samsung.com.crt + - update-ca-certificates --fresh > /dev/null + # Configure buildkitd.toml to use newly-installed certs + - | + cat < /buildkitd.toml + [registry."$CI_REGISTRY"] + ca=["/etc/ssl/certs/ca-certificates.crt"] + EOF + # Initialize buildkit with custom config + - docker buildx create --driver=docker-container --name=buildkit-builder --use --config /buildkitd.toml + # Login to Gitlab container registry + - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY + script: + - | + docker buildx build \ + --cache-from type=registry,ref=$CACHE_TAG \ + --cache-to type=registry,ref=$CACHE_TAG \ + --push \ + --tag $IMAGE_TAG \ + --file $DOCKERFILE_PATH . \ + --provenance false + rules: + - !reference [.default_rules, merge_and_push] + - if: '$CI_PIPELINE_SOURCE == "parent_pipeline"' + when: always + diff --git a/.gitlab/defaults.yml b/.gitlab/defaults.yml new file mode 100644 index 00000000..e73bf759 --- /dev/null +++ b/.gitlab/defaults.yml @@ -0,0 +1,10 @@ +include: + - project: dfs/dss/dss + ref: master + file: .gitlab/defaults.yml + - project: dfs/dss/dss + ref: master + file: .gitlab/sync-github.yml + - .gitlab/lint.yml + - .gitlab/build.yml + - .gitlab/deploy.yml diff --git a/.gitlab/deploy.yml b/.gitlab/deploy.yml new file mode 100644 index 00000000..4e6c2498 --- /dev/null +++ b/.gitlab/deploy.yml @@ -0,0 +1,27 @@ +include: .gitlab/ansible.yml + +deploy DSS with upstream dss-sdk artifacts: + extends: deploy DSS + stage: deploy + script: + - | + ansible-playbook -i $ANSIBLE_INVENTORY playbooks/download_artifacts.yml \ + -e "download_artifacts=true" \ + -e "artifacts_url=$MINIO_HOST_URL/dss-artifacts" \ + -e "artifacts_branch=$BRANCH_NAME" + - rm -f artifacts/nkv-target* + - rm -f artifacts/nkv-sdk-bin* + - cp $CI_PROJECT_DIR/df_out/nkv-target-*.tgz artifacts/ + - cp $CI_PROJECT_DIR/host_out/nkv-sdk-bin-*.tgz artifacts/ + - ansible-playbook -i $ANSIBLE_INVENTORY playbooks/remove_dss_software.yml + - ansible-playbook -i $ANSIBLE_INVENTORY playbooks/deploy_dss_software.yml + - ansible-playbook -i $ANSIBLE_INVENTORY playbooks/test_nkv_test_cli.yml -e nkv_test_cli_test=suite -e nkv_test_cli_suite=suite003 + - ansible-playbook -i $ANSIBLE_INVENTORY playbooks/test_s3_benchmark.yml + needs: + - build docker + - project: dfs/dss/dss-sdk + job: build dss-sdk + ref: $UPSTREAM_REF + artifacts: true + rules: + - if: $CI_PIPELINE_SOURCE == "parent_pipeline" && $CI_MERGE_REQUEST_SOURCE_PROJECT_PATH == "dfs/dss/dss-sdk" diff --git a/.gitlab/lint.yml b/.gitlab/lint.yml new file mode 100644 index 00000000..93cb08bf --- /dev/null +++ b/.gitlab/lint.yml @@ -0,0 +1,8 @@ +ansible-lint: + stage: lint + script: ansible-lint * + needs: [] + rules: + - !reference [.default_rules, merge_and_push] + - if: '$CI_PIPELINE_SOURCE == "parent_pipeline"' + when: never diff --git a/README.md b/README.md index 0c6f27e4..bd9615cd 100644 --- a/README.md +++ b/README.md @@ -506,7 +506,7 @@ This playbook will perform the following actions: * Search for errors in all MinIO logs across all [hosts] / [servers] * Search for errors in all target logs across all [targets] / [servers] -#### playbooks/deploy_datamover.yml +#### playbooks/deploy_client.yml Execute this playbook to deploy DSS Client, including datamover, client library, and their dependencies. Artifacts are deployed to hosts under the [clients] group. @@ -539,7 +539,7 @@ This path can be changed by setting the `coredump_dir` var. see: /group_vars/all Execute this playbook to download artifacts from the dss-artifacts S3 bucket. By default, this playbook will download artifacts from the public AWS S3 dss-artifacts bucket (public HTTP URL). -The bucket URL can be overridden with the public URL of any S3-compatible bucket (eg: MinIO, DSS). +The bucket can be overridden with the public URL of any S3-compatible bucket (eg: MinIO, DSS) Additionally, the branch name can also be overridden. #### playbooks/format_redeploy_dss_software.yml @@ -656,7 +656,6 @@ Additional datamover vars: * datamover_client_lib - Datamover client library * datamover_logging_path - Path of datamover logs * datamover_logging_level - Datamover logging level -* datamover_gcc_version - Datamover GCC version * datamover_index_data_queue_size - Number of entries in datamover index queue * datamover_awslib_log_debug - Enable or disable AWS lib debugging @@ -776,7 +775,7 @@ iperf can be tuned by configuring the following vars (default values shown): #### playbooks/test_nkv_test_cli.yml -Perform a basic nkv_test_cli test and report observed throughput. +By default, perform a basic nkv_test_cli test and report observed throughput. This playbook will execute a suite of nkv_test_cli tests in order: 1. Put @@ -787,17 +786,29 @@ This playbook will execute a suite of nkv_test_cli tests in order: Upon test completion, throughput is reported for PUT and GET. +Optionally, this playbook can be used to execute a suite of regression test cases. +This can be done by changing the value of `nkv_test_cli_test` from `smoke` to `suite`. +The default set of test cases can be found in `roles/test_nkv_test_cli/vars/suite001.yml`. +You can create additional test suites using this file as a template. +You can specify your custom test suite by setting `nkv_test_cli_suite` to `your-test` (default: `suite001`). + nkv_test_cli can be tuned by configuring the following vars (default values shown): -| Var name | Default | Description | -| ------------------------------ | ------- | --------------------------------------------------------------------------- | -| nkv_test_cli_keysize | 60 | Key size in bytes. Max size = 255 | -| nkv_test_cli_valsize | 1048576 | Value size in bytes. Max size = 1048576 | -| nkv_test_cli_threads | 128 | Number of threads | -| nkv_test_cli_objects | 2000 | Number of objects for each thread (total objects = objects x threads) | -| nkv_test_cli_vm_objects | 100 | Number of objects if host is a VM (default reduced due to lower throughput) | -| nkv_test_cli_async_timeout | 600 | Async timeout in seconds (increase for larger dataset, or slow throughput) | -| nkv_test_cli_async_retry_delay | 5 | Async retry delay in seconds | +| Var name | Default | Description | +| ------------------------------ | ------------ | ------------------------------------------------------------------------------------- | +| nkv_test_cli_port | 1030 | Port used by nkv_test_cli to communicate with subsystem | +| nkv_test_cli_prefix | meta/ansible | KV prefix used to write object. Must beging with `meta/` | +| nkv_test_cli_keysize | 60 | Key size in bytes. Max size = 255 | +| nkv_test_cli_valsize | 1048576 | Value size in bytes. Max size = 1048576 | +| nkv_test_cli_threads | 128 | Number of threads | +| nkv_test_cli_objects | 2000 | Number of objects for each thread (total objects = objects x threads) | +| nkv_test_cli_async_timeout | 600 | Async timeout in seconds (increase for larger dataset, or slow throughput) | +| nkv_test_cli_async_retry_delay | 5 | Async retry delay in seconds | +| nkv_test_cli_test | smoke | Run standard "smoke" test. Change to "suite" to run regression test suite | +| nkv_test_cli_suite | suite001 | Name of test suite to run. Corresponds to suite vars in roles/test_nkv_test_cli/vars/ | +| nkv_test_cli_integrity | false | Run nkv_test_cli in data integrity mode | +| nkv_test_cli_mixed_io | false | Run nkv_test_cli with "small meta io before doing a big io" | +| nkv_test_cli_simulate_minio | false | Run nkv_test_cli with "IO pattern similar to MinIO" | #### playbooks/test_ping.yml diff --git a/group_vars/all.yml b/group_vars/all.yml index c3b12665..a264fb1b 100644 --- a/group_vars/all.yml +++ b/group_vars/all.yml @@ -159,7 +159,6 @@ # datamover_operation: DEL # datamover_operation: TEST # datamover_dryrun: false -# datamover_compaction: true # datamover_prefix: '' # datamover_debug: false # datamover_data_integrity: true @@ -190,10 +189,9 @@ # datamover_client_lib: dss_client # datamover_logging_path: /var/log/dss # datamover_logging_level: INFO -# datamover_gcc_version: 5.1 # datamover_index_data_queue_size: 50000 # datamover_awslib_log_debug: 0 -# datamover_compaction: "yes" +# datamover_compaction: "no" ### NTP defaults # ntp_enabled: true @@ -225,13 +223,20 @@ # iperf_duration: 10 ### nkv_test_cli defaults +# nkv_test_cli_port: 1030 +# nkv_test_cli_prefix: meta/ansible # nkv_test_cli_keysize: 60 # nkv_test_cli_valsize: 1048576 # nkv_test_cli_threads: 128 # nkv_test_cli_objects: 2000 -# nkv_test_cli_vm_objects: 100 # nkv_test_cli_async_timeout: 600 # nkv_test_cli_async_retry_delay: 5 +# nkv_test_cli_test: smoke +# nkv_test_cli_test: suite +# nkv_test_cli_suite: suite001 +# nkv_test_cli_integrity: false +# nkv_test_cli_mixed_io: false +# nkv_test_cli_simulate_minio: false ### EPEL defaults # skip_epel: false diff --git a/playbooks/deploy_client.yml b/playbooks/deploy_client.yml index 0760f269..311e2fac 100644 --- a/playbooks/deploy_client.yml +++ b/playbooks/deploy_client.yml @@ -95,7 +95,6 @@ hosts: clients gather_facts: false roles: - - deploy_gcc - deploy_aws_sdk_cpp - deploy_dss_host - deploy_client_library diff --git a/playbooks/deploy_dss_software.yml b/playbooks/deploy_dss_software.yml index 960b5391..da372c5d 100644 --- a/playbooks/deploy_dss_software.yml +++ b/playbooks/deploy_dss_software.yml @@ -101,7 +101,6 @@ - targets gather_facts: false roles: - - deploy_gcc - deploy_dss_target # - name: Deploy etcd Gateway diff --git a/playbooks/start_datamover.yml b/playbooks/start_datamover.yml index 25c9f4ca..4c44f974 100644 --- a/playbooks/start_datamover.yml +++ b/playbooks/start_datamover.yml @@ -91,7 +91,6 @@ # * datamover_client_lib - Datamover client library # * datamover_logging_path - Path of datamover logs # * datamover_logging_level - Datamover logging level -# * datamover_gcc_version - Datamover GCC version # * datamover_index_data_queue_size - Number of entries in datamover index queue # * datamover_awslib_log_debug - Enable or disable AWS lib debugging # diff --git a/playbooks/test_nkv_test_cli.yml b/playbooks/test_nkv_test_cli.yml index 828c0280..e38559d6 100644 --- a/playbooks/test_nkv_test_cli.yml +++ b/playbooks/test_nkv_test_cli.yml @@ -33,7 +33,7 @@ # # #### playbooks/test_nkv_test_cli.yml # -# Perform a basic nkv_test_cli test and report observed throughput. +# By default, perform a basic nkv_test_cli test and report observed throughput. # This playbook will execute a suite of nkv_test_cli tests in order: # # 1. Put @@ -44,17 +44,29 @@ # # Upon test completion, throughput is reported for PUT and GET. # +# Optionally, this playbook can be used to execute a suite of regression test cases. +# This can be done by changing the value of `nkv_test_cli_test` from `smoke` to `suite`. +# The default set of test cases can be found in `roles/test_nkv_test_cli/vars/suite001.yml`. +# You can create additional test suites using this file as a template. +# You can specify your custom test suite by setting `nkv_test_cli_suite` to `your-test` (default: `suite001`). +# # nkv_test_cli can be tuned by configuring the following vars (default values shown): # -# | Var name | Default | Description | -# | ------------------------------ | ------- | --------------------------------------------------------------------------- | -# | nkv_test_cli_keysize | 60 | Key size in bytes. Max size = 255 | -# | nkv_test_cli_valsize | 1048576 | Value size in bytes. Max size = 1048576 | -# | nkv_test_cli_threads | 128 | Number of threads | -# | nkv_test_cli_objects | 2000 | Number of objects for each thread (total objects = objects x threads) | -# | nkv_test_cli_vm_objects | 100 | Number of objects if host is a VM (default reduced due to lower throughput) | -# | nkv_test_cli_async_timeout | 600 | Async timeout in seconds (increase for larger dataset, or slow throughput) | -# | nkv_test_cli_async_retry_delay | 5 | Async retry delay in seconds | +# | Var name | Default | Description | +# | ------------------------------ | ------------ | ------------------------------------------------------------------------------------- | +# | nkv_test_cli_port | 1030 | Port used by nkv_test_cli to communicate with subsystem | +# | nkv_test_cli_prefix | meta/ansible | KV prefix used to write object. Must beging with `meta/` | +# | nkv_test_cli_keysize | 60 | Key size in bytes. Max size = 255 | +# | nkv_test_cli_valsize | 1048576 | Value size in bytes. Max size = 1048576 | +# | nkv_test_cli_threads | 128 | Number of threads | +# | nkv_test_cli_objects | 2000 | Number of objects for each thread (total objects = objects x threads) | +# | nkv_test_cli_async_timeout | 600 | Async timeout in seconds (increase for larger dataset, or slow throughput) | +# | nkv_test_cli_async_retry_delay | 5 | Async retry delay in seconds | +# | nkv_test_cli_test | smoke | Run standard "smoke" test. Change to "suite" to run regression test suite | +# | nkv_test_cli_suite | suite001 | Name of test suite to run. Corresponds to suite vars in roles/test_nkv_test_cli/vars/ | +# | nkv_test_cli_integrity | false | Run nkv_test_cli in data integrity mode | +# | nkv_test_cli_mixed_io | false | Run nkv_test_cli with "small meta io before doing a big io" | +# | nkv_test_cli_simulate_minio | false | Run nkv_test_cli with "IO pattern similar to MinIO" | - name: Validate ansible versions and dependencies hosts: localhost diff --git a/roles/cleanup_dss_minio/defaults/main.yml b/roles/cleanup_dss_minio/defaults/main.yml index 27f21505..b36cc8bd 100644 --- a/roles/cleanup_dss_minio/defaults/main.yml +++ b/roles/cleanup_dss_minio/defaults/main.yml @@ -31,6 +31,7 @@ ### Path defaults dss_dir: /usr/dss +dss_log_dir: /var/log/dss nkv_sdk_dir: "{{ dss_dir }}/nkv-sdk" nkv_sdk_bin_dir: "{{ nkv_sdk_dir }}/bin" nkv_sdk_conf_dir: "{{ nkv_sdk_dir }}/conf" diff --git a/roles/cleanup_dss_minio/tasks/execute_cleanup_script.yml b/roles/cleanup_dss_minio/tasks/execute_cleanup_script.yml index 7da2171d..66b6df13 100644 --- a/roles/cleanup_dss_minio/tasks/execute_cleanup_script.yml +++ b/roles/cleanup_dss_minio/tasks/execute_cleanup_script.yml @@ -31,10 +31,8 @@ - name: Get range of mountpoints ansible.builtin.command: >- - grep -oP "mount_point\"\: \"/dev/nvme\K\d+" nkv_config_{{ rocev2_ip }}.json + grep -oP "mount_point\"\: \"/dev/nvme\K\d+" {{ nkv_sdk_conf_dir }}/nkv_config_{{ rocev2_ip }}.json register: mountpoint_range - args: - chdir: "{{ nkv_sdk_conf_dir }}" changed_when: false - name: Set min_mountpoint and max_mountpoint vars @@ -43,8 +41,11 @@ max_mountpoint: "{{ mountpoint_range.stdout_lines[-1] }}" - name: Execute minio_cleanup.sh - ansible.builtin.command: "sh ./minio_cleanup.sh {{ min_mountpoint }} {{ max_mountpoint }} nkv_config_{{ rocev2_ip }}.json" + ansible.builtin.shell: >- + sh {{ nkv_sdk_bin_dir }}/minio_cleanup.sh {{ min_mountpoint }} {{ max_mountpoint }} nkv_config_{{ rocev2_ip }}.json + > {{ dss_log_dir }}/minio_cleanup.log 2>&1 args: + # chdir required to support legacy cleanup script chdir: "{{ nkv_sdk_bin_dir }}" become: true run_once: true diff --git a/roles/create_datamover_conf/defaults/main.yml b/roles/create_datamover_conf/defaults/main.yml index a087f602..6be249ed 100644 --- a/roles/create_datamover_conf/defaults/main.yml +++ b/roles/create_datamover_conf/defaults/main.yml @@ -68,12 +68,7 @@ datamover_server_as_prefix: "yes" datamover_bucket: bucket datamover_client_lib: dss_client datamover_logging_level: INFO -datamover_gcc_version: 5.1 datamover_index_data_queue_size: 50000 datamover_awslib_log_debug: 0 datamover_fs_mounted: false -datamover_compaction: "yes" - -### GCC defaults -gcc_setenv: /usr/local/bin/setenv-for-gcc510.sh -gcc_restore: /usr/local/bin/restore-default-paths-gcc510.sh +datamover_compaction: "no" diff --git a/roles/deploy_dss_target/tasks/main.yml b/roles/deploy_dss_target/tasks/main.yml index 685e0392..fb161d28 100644 --- a/roles/deploy_dss_target/tasks/main.yml +++ b/roles/deploy_dss_target/tasks/main.yml @@ -29,6 +29,12 @@ # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --- +- name: Set target_dir for gcov + ansible.builtin.set_fact: + original_target_dir: "{{ target_dir }}" + target_dir: "{{ (target_dir.split('/df_out')).0 }}" + when: target_dir is search('df_out') + - name: Install epel-release repo ansible.builtin.include_role: name: deploy_epel @@ -36,6 +42,11 @@ - name: Include vars ansible.builtin.include_vars: "{{ ansible_distribution_file_variety | lower }}_{{ ansible_distribution_major_version }}.yml" +- name: Append additional packages for gcov deploy + ansible.builtin.set_fact: + dss_target_deps: "{{ dss_target_deps + additional_gcov_packages }}" + when: original_target_dir is defined + - name: Install target runtime dependencies ansible.builtin.yum: name: "{{ dss_target_deps }}" @@ -139,3 +150,18 @@ state: directory mode: 0755 become: true + +- name: Restore target_dir for gcov + ansible.builtin.set_fact: + target_dir: "{{ original_target_dir }}" + when: original_target_dir is defined + +- name: Stat dss_formatter + ansible.builtin.stat: + path: "{{ target_dir }}/bin/dss_formatter" + register: dss_formatter + +- name: Deploy GCC + ansible.builtin.include_role: + name: deploy_gcc + when: not dss_formatter.stat.exists diff --git a/roles/deploy_dss_target/vars/main.yml b/roles/deploy_dss_target/vars/main.yml new file mode 100644 index 00000000..0a4078e8 --- /dev/null +++ b/roles/deploy_dss_target/vars/main.yml @@ -0,0 +1,6 @@ +--- + +additional_gcov_packages: + - rdma-core-devel + - cppunit-devel + - libstdc++-devel diff --git a/roles/format_blobfs/templates/blobfs.mkfs.in.conf.j2 b/roles/format_blobfs/templates/blobfs.mkfs.in.conf.j2 deleted file mode 100644 index c30304ae..00000000 --- a/roles/format_blobfs/templates/blobfs.mkfs.in.conf.j2 +++ /dev/null @@ -1,37 +0,0 @@ -# The Clear BSD License -# -# Copyright (c) 2022 Samsung Electronics Co., Ltd. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted (subject to the limitations in the disclaimer -# below) provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * Neither the name of Samsung Electronics Co., Ltd. nor the names of its -# contributors may be used to endorse or promote products derived from this -# software without specific prior written permission. -# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY -# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND -# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT -# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -[Nvme] -{{ item }} - -{% if ansible_virtualization_role == "guest" %} -[Blobfs] - TrimEnabled No -{% endif %} \ No newline at end of file diff --git a/roles/format_disks/tasks/main.yml b/roles/format_disks/tasks/main.yml index 286d4552..272d19c5 100644 --- a/roles/format_disks/tasks/main.yml +++ b/roles/format_disks/tasks/main.yml @@ -29,9 +29,9 @@ # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --- -- name: Remove BlobFS Files +- name: Remove KV Filesystem files ansible.builtin.include_role: - name: remove_blobfs_files + name: remove_kvfs_files when: dss_target_mode is search('kv_block') - name: Format KVSSD diff --git a/roles/format_blobfs/defaults/main.yml b/roles/format_kvfs/defaults/main.yml similarity index 98% rename from roles/format_blobfs/defaults/main.yml rename to roles/format_kvfs/defaults/main.yml index 2d5ef797..76565c9e 100644 --- a/roles/format_blobfs/defaults/main.yml +++ b/roles/format_kvfs/defaults/main.yml @@ -36,7 +36,7 @@ target_bin_dir: "{{ target_dir }}/bin" target_conf_dir: /etc/dss target_conf_file: "{{ target_conf_dir }}/nvmf.in.conf" target_lib_dir: "{{ target_dir }}/lib" - +dss_log_dir: /var/log/dss ### GCC defaults gcc_setenv: /usr/local/bin/setenv-for-gcc510.sh diff --git a/roles/format_kvfs/tasks/check_blobfs_format.yml b/roles/format_kvfs/tasks/check_blobfs_format.yml new file mode 100644 index 00000000..e1c92db4 --- /dev/null +++ b/roles/format_kvfs/tasks/check_blobfs_format.yml @@ -0,0 +1,38 @@ +--- + +- name: Initialize blobfs_format_error variable + ansible.builtin.set_fact: + blobfs_format_error: false + +- name: Check output of existing format_disks.sh + ansible.builtin.slurp: + src: "{{ dss_log_dir }}/format_disks.out" + changed_when: false + register: previous_format + +- name: Check for mkfs_blobfs errors + ansible.builtin.set_fact: + blobfs_format_error: true + when: format_disks_stdout is search(item) + loop: + - Failed to initialize filesystem + - io_device bdev_nvme_poll_groups not unregistered + - bdev modules init failed + - Init subsystem bdev failed + - nvme_identify_controller failed + - Failed to initialize SSD + - error while loading shared libraries + - core dumped + - Illegal instruction + - error while loading shared libraries + vars: + format_disks_stdout: "{{ previous_format.content | b64decode }}" + +- name: Check filesystem initialized + ansible.builtin.set_fact: + blobfs_format_error: false + vars: + format_disks_stdout: "{{ previous_format.content | b64decode }}" + when: + - not blobfs_format_error + - format_disks_stdout is search('Initializing filesystem on bdev') diff --git a/roles/format_blobfs/tasks/format_blobfs.yml b/roles/format_kvfs/tasks/format_kvfs.yml similarity index 56% rename from roles/format_blobfs/tasks/format_blobfs.yml rename to roles/format_kvfs/tasks/format_kvfs.yml index 21e5a04e..c82b4900 100644 --- a/roles/format_blobfs/tasks/format_blobfs.yml +++ b/roles/format_kvfs/tasks/format_kvfs.yml @@ -42,64 +42,64 @@ - name: Create format_disks.sh script ansible.builtin.template: src: format_disks.sh.j2 - dest: "{{ target_bin_dir }}/format_disks.sh" + dest: "{{ target_conf_dir }}/format_disks.sh" mode: 0751 + vars: + format_bin: "{% if dss_formatter.stat.exists %}dss_formatter{% else %}mkfs_blobfs{% endif %}" register: create_format_disks_template become: true - name: Check presence of existing format_disks.sh output ansible.builtin.stat: - path: "{{ target_bin_dir }}/format_disks.out" + path: "{{ dss_log_dir }}/format_disks.out" register: format_disks_out_file -- name: Check output of existing format_disks.sh - ansible.builtin.command: "cat {{ target_bin_dir }}/format_disks.out" - changed_when: false - when: format_disks_out_file.stat.exists - register: pre_format_disks +- name: Check previous format_disks.sh output + ansible.builtin.include_tasks: check_blobfs_format.yml + when: + - format_disks_out_file.stat.exists + - not dss_formatter.stat.exists - name: Execute format_disks.sh script - ansible.builtin.shell: ./format_disks.sh > format_disks.out 2>&1 + ansible.builtin.command: "{{ target_conf_dir }}//format_disks.sh" register: execute_format_disks - args: - chdir: "{{ target_bin_dir }}" when: > not format_disks_out_file.stat.exists or - pre_format_disks.stdout is search('Failed to initialize filesystem') or - pre_format_disks.stdout is search('io_device bdev_nvme_poll_groups not unregistered') or - pre_format_disks.stdout is search('bdev modules init failed') or - pre_format_disks.stdout is search('Init subsystem bdev failed') or - pre_format_disks.stdout is search('nvme_identify_controller failed') or - pre_format_disks.stdout is search('Failed to initialize SSD') or - pre_format_disks.stdout is search('error while loading shared libraries') or - pre_format_disks.stdout is search('core dumped') or - pre_format_disks.stdout is search('Illegal instruction') or - pre_format_disks.stdout is search('error while loading shared libraries') or - pre_format_disks.stdout is not search('Initializing filesystem on bdev') or + blobfs_format_error | d(false) or create_format_disks_template.changed or - create_blobfs_conf.changed + create_kvfs_conf.changed become: true -- name: Read output of format_disks.sh - ansible.builtin.command: "cat {{ target_bin_dir }}/format_disks.out" - changed_when: false - when: execute_format_disks is not skipped - register: format_disks +- name: Write format_disks.sh output + ansible.builtin.copy: + dest: "{{ dss_log_dir }}/format_disks.{{ item.suffix }}" + content: "{{ execute_format_disks[item.type] }}" + mode: 0644 + loop: + - suffix: out + type: stdout + - suffix: err + type: stderr + loop_control: + label: "{{ dss_log_dir }}/format_disks.{{ item.suffix }}" + when: + - dss_formatter.stat.exists + - execute_format_disks is not skipped + - execute_format_disks[item.type] != '' + become: true + +- name: Check previous format_disks.sh output + ansible.builtin.include_tasks: check_blobfs_format.yml + when: + - execute_format_disks is not skipped + - not dss_formatter.stat.exists - name: Assert filesystem initialized successfully ansible.builtin.assert: - that: format_disks.stdout is not search(item) + that: not blobfs_format_error | d(false) fail_msg: | - *ERROR* {{ item }} - Check {{ target_bin_dir }}/format_disks.out for details. - loop: - - Failed to initialize filesystem - - io_device bdev_nvme_poll_groups not unregistered - - bdev modules init failed - - Init subsystem bdev failed - - nvme_identify_controller failed - - Failed to initialize SSD - - core dumped - - Illegal instruction - - error while loading shared libraries - when: format_disks is not skipped + mkfs_blobfs format failed + Check {{ dss_log_dir }}/format_disks.out for details. + when: + - execute_format_disks is not skipped + - not dss_formatter.stat.exists diff --git a/roles/format_blobfs/tasks/main.yml b/roles/format_kvfs/tasks/main.yml similarity index 86% rename from roles/format_blobfs/tasks/main.yml rename to roles/format_kvfs/tasks/main.yml index a9b8d2ce..ca08b1a2 100644 --- a/roles/format_blobfs/tasks/main.yml +++ b/roles/format_kvfs/tasks/main.yml @@ -30,10 +30,8 @@ --- - name: Get spdk status - ansible.builtin.command: ./setup.sh status + ansible.builtin.command: "{{ target_dir }}/scripts/setup.sh status" changed_when: false - args: - chdir: "{{ target_dir }}/scripts" register: spdk_status become: true @@ -44,6 +42,11 @@ spdk_status.stdout is search('vfio-pci') msg: Cannot format BlobFS. Disks are not in SPDK mode. Start DSS software first. +- name: Stat dss_formatter + ansible.builtin.stat: + path: "{{ target_dir }}/bin/dss_formatter" + register: dss_formatter + - name: Stat nvmf.in.conf ansible.builtin.stat: path: "{{ target_conf_file }}" @@ -70,17 +73,17 @@ Execute 'stop_reset_dss_software' playbook to put disks back into kernel mode. Then verify desired SSD firmware version. -- name: Create blobfs scripts for each device +- name: Create KV Filesystem scripts for each device ansible.builtin.template: - src: blobfs.mkfs.in.conf.j2 - dest: "{{ target_conf_dir }}/blobfs.mkfs.in.{{ ansible_loop.index0 }}.conf" + src: kvfs.mkfs.in.conf.j2 + dest: "{{ target_conf_dir }}/{{ format_type }}-{{ ansible_loop.index0 }}.conf" mode: 0644 loop: "{{ pcie_addrs.stdout_lines }}" loop_control: extended: true - register: create_blobfs_conf + register: create_kvfs_conf become: true -- name: Create BlobFS filesystem for KV-Block - ansible.builtin.include_tasks: format_blobfs.yml +- name: Create filesystem for KV-Block + ansible.builtin.include_tasks: format_kvfs.yml when: dss_target_mode is search('kv_block') diff --git a/roles/format_blobfs/templates/format_disks.sh.j2 b/roles/format_kvfs/templates/format_disks.sh.j2 similarity index 82% rename from roles/format_blobfs/templates/format_disks.sh.j2 rename to roles/format_kvfs/templates/format_disks.sh.j2 index 02c9782e..67002ea4 100644 --- a/roles/format_blobfs/templates/format_disks.sh.j2 +++ b/roles/format_kvfs/templates/format_disks.sh.j2 @@ -30,15 +30,20 @@ # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. set -e - +{ +{% if not dss_formatter.stat.exists %} {% if xrt_setenv_script.stat.exists %} source {{ xrt_setenv }} {% endif %} source {{ gcc_setenv }} +{% endif %} {% for pcie_addr in pcie_addrs.stdout_lines -%} {% set sn_regex = '^.+traddr:[^"]+" "([^"]+)"$' -%} - ./mkfs_blobfs {{ target_conf_dir }}/blobfs.mkfs.in.{{ loop.index0 }}.conf "{{ pcie_addr | regex_replace(sn_regex, '\\1') }}n1" + {{ target_bin_dir}}/{{ format_bin }} {% if dss_formatter.stat.exists %}--config {% endif %}{{ target_conf_dir }}/{{ format_type}}-{{ loop.index0 }}.conf {% if dss_formatter.stat.exists %}--dev_name {% endif %}"{{ pcie_addr | regex_replace(sn_regex, '\\1') }}n1" {% endfor %} wait +{% if not dss_formatter.stat.exists %} source {{ gcc_restore }} +{% endif %} +} 2>&1 | tee {{ dss_log_dir }}/format_disks.out \ No newline at end of file diff --git a/roles/format_kvfs/templates/kvfs.mkfs.in.conf.j2 b/roles/format_kvfs/templates/kvfs.mkfs.in.conf.j2 new file mode 100644 index 00000000..c79f4084 --- /dev/null +++ b/roles/format_kvfs/templates/kvfs.mkfs.in.conf.j2 @@ -0,0 +1,7 @@ +[Nvme] +{{ item }} + +{% if format_type == "blobfs" and ansible_virtualization_role == "guest" %} +[Blobfs] + TrimEnabled No +{% endif %} \ No newline at end of file diff --git a/roles/format_kvfs/vars/main.yml b/roles/format_kvfs/vars/main.yml new file mode 100644 index 00000000..285c546c --- /dev/null +++ b/roles/format_kvfs/vars/main.yml @@ -0,0 +1,3 @@ +--- + +format_type: "{% if dss_formatter.stat.exists %}kvfs{% else %}blobfs{% endif %}" diff --git a/roles/remove_dss_software/defaults/main.yml b/roles/remove_dss_software/defaults/main.yml index a559e376..b24af4e8 100644 --- a/roles/remove_dss_software/defaults/main.yml +++ b/roles/remove_dss_software/defaults/main.yml @@ -31,10 +31,27 @@ ### Path defaults dss_dir: /usr/dss +client_library_dir: "{{ dss_dir }}/client-library" +datamover_dir: "{{ dss_dir }}/nkv-datamover" +dss_agent_config_file_dir: /etc/nkv-agent +minio_dir: "{{ dss_dir }}/nkv-minio" +nkv_agent_dir: /usr/share/nkvagent +nkv_agent_tmp_dir: /tmp/dss +nkv_sdk_dir: "{{ dss_dir }}/nkv-sdk" +target_dir: "{{ dss_dir }}/nkv-target" target_conf_dir: /etc/dss xrt_dir: /opt/xilinx ### Remove DSS defaults remove_dss_software_dir_list: + - "{{ client_library_dir }}" + - "{{ datamover_dir }}" + - "{{ dss_agent_config_file_dir }}" + - "{{ minio_dir }}" + - "{{ nkv_agent_dir }}" + - "{{ nkv_agent_tmp_dir }}" + - "{{ nkv_sdk_dir }}" + - "{{ target_dir }}" - "{{ dss_dir }}" + - "{{ target_conf_dir }}" - "{{ xrt_dir }}" diff --git a/roles/remove_dss_software/tasks/main.yml b/roles/remove_dss_software/tasks/main.yml index a94fc8d4..21508adc 100644 --- a/roles/remove_dss_software/tasks/main.yml +++ b/roles/remove_dss_software/tasks/main.yml @@ -29,6 +29,12 @@ # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --- +- name: Set target_dir for gcov + ansible.builtin.set_fact: + original_target_dir: "{{ target_dir }}" + target_dir: "{{ (target_dir.split('/df_out')).0 }}" + when: target_dir is search('df_out') + - name: Remove DSS Software ansible.builtin.file: path: "{{ dss_path }}" @@ -37,3 +43,8 @@ loop_control: loop_var: dss_path become: true + +- name: Restore target_dir for gcov + ansible.builtin.set_fact: + target_dir: "{{ original_target_dir }}" + when: original_target_dir is defined diff --git a/roles/remove_blobfs_files/defaults/main.yml b/roles/remove_kvfs_files/defaults/main.yml similarity index 98% rename from roles/remove_blobfs_files/defaults/main.yml rename to roles/remove_kvfs_files/defaults/main.yml index 360e10dd..1ce82ee7 100644 --- a/roles/remove_blobfs_files/defaults/main.yml +++ b/roles/remove_kvfs_files/defaults/main.yml @@ -31,5 +31,6 @@ ### Path defaults dss_dir: /usr/dss +dss_log_dir: /var/log/dss target_conf_dir: /etc/dss target_dir: "{{ dss_dir }}/nkv-target" diff --git a/roles/remove_blobfs_files/tasks/main.yml b/roles/remove_kvfs_files/tasks/main.yml similarity index 86% rename from roles/remove_blobfs_files/tasks/main.yml rename to roles/remove_kvfs_files/tasks/main.yml index 496160a1..960672cd 100644 --- a/roles/remove_blobfs_files/tasks/main.yml +++ b/roles/remove_kvfs_files/tasks/main.yml @@ -29,13 +29,16 @@ # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --- -- name: Find BlobFS config files +- name: Find KV Filesystem config files ansible.builtin.find: paths: "{{ target_conf_dir }}" - patterns: blobfs.mkfs.in*.conf + patterns: + - blobfs.mkfs.in*.conf + - blobfs-*.conf + - kvfs-*.conf register: blobfs_conf_files -- name: Remove BlobFS config files +- name: Remove KV Filesystem config files ansible.builtin.file: path: "{{ item.path }}" state: absent @@ -46,9 +49,10 @@ - name: Remove format_disks.sh files ansible.builtin.file: - path: "{{ target_dir }}/bin/{{ item }}" + path: "{{ item }}" state: absent loop: - - format_disks.sh - - format_disks.out + - "{{ target_conf_dir }}/format_disks.sh" + - "{{ dss_log_dir }}/format_disks.out" + - "{{ dss_log_dir }}/format_disks.err" become: true diff --git a/roles/reset_spdk/tasks/main.yml b/roles/reset_spdk/tasks/main.yml index c0af7a43..3fabdbb0 100644 --- a/roles/reset_spdk/tasks/main.yml +++ b/roles/reset_spdk/tasks/main.yml @@ -35,9 +35,7 @@ register: dss_target_path - name: Move disks back to kernel mode (spdk reset) - ansible.builtin.command: /usr/bin/python2 ./dss_target.py reset - args: - chdir: "{{ target_bin_dir }}" + ansible.builtin.command: "{{ target_dir }}/scripts/setup.sh reset" register: target_reset changed_when: target_reset.stdout is search('uio_pci_generic -> nvme') when: dss_target_path.stat.exists diff --git a/roles/start_compaction/tasks/main.yml b/roles/start_compaction/tasks/main.yml index 12a83eb5..52d6053b 100644 --- a/roles/start_compaction/tasks/main.yml +++ b/roles/start_compaction/tasks/main.yml @@ -29,29 +29,14 @@ # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --- -- name: Get Subsystems NQNs - ansible.builtin.include_role: - name: get_subsystem_nqns +- name: Stat dss_formatter + ansible.builtin.stat: + path: "{{ target_dir }}/bin/dss_formatter" + register: dss_formatter -- name: Start compaction - ansible.builtin.command: "{{ target_script_dir }}/dss_rpc.py -s /var/run/spdk.sock rdb_compact -n {{ item }}" - loop: "{{ subsystem_nqns }}" - become: true - when: dss_target_mode is search('kv_block') and not hpos - -- name: Check compaction status - ansible.builtin.command: "{{ target_script_dir }}/dss_rpc.py -s /var/run/spdk.sock rdb_compact --get_status -n {{ item }}" - register: compaction_status - loop: "{{ subsystem_nqns }}" - until: > - compaction_status.rc !=0 or - compaction_status.stdout is search('IDLE') - changed_when: false - failed_when: > - compaction_status.rc !=0 or - (compaction_status.stdout is not search('IDLE') and - compaction_status.stdout is not search('IN PROGRESS')) - retries: "{{ (start_compaction_timeout | int / start_compaction_delay | int) | int }}" - delay: "{{ start_compaction_delay }}" - become: true - when: dss_target_mode is search('kv_block') and not hpos +- name: Start Compaction + ansible.builtin.include_tasks: start_compaction.yml + when: + - not dss_formatter.stat.exists + - dss_target_mode is search('kv_block') + - not hpos diff --git a/roles/start_compaction/tasks/start_compaction.yml b/roles/start_compaction/tasks/start_compaction.yml new file mode 100644 index 00000000..9fe67d32 --- /dev/null +++ b/roles/start_compaction/tasks/start_compaction.yml @@ -0,0 +1,26 @@ +--- + +- name: Get Subsystems NQNs + ansible.builtin.include_role: + name: get_subsystem_nqns + +- name: Start compaction + ansible.builtin.command: "{{ target_script_dir }}/dss_rpc.py -s /var/run/spdk.sock rdb_compact -n {{ item }}" + loop: "{{ subsystem_nqns }}" + become: true + +- name: Check compaction status + ansible.builtin.command: "{{ target_script_dir }}/dss_rpc.py -s /var/run/spdk.sock rdb_compact --get_status -n {{ item }}" + register: compaction_status + loop: "{{ subsystem_nqns }}" + until: > + compaction_status.rc !=0 or + compaction_status.stdout is search('IDLE') + changed_when: false + failed_when: > + compaction_status.rc !=0 or + (compaction_status.stdout is not search('IDLE') and + compaction_status.stdout is not search('IN PROGRESS')) + retries: "{{ (start_compaction_timeout | int / start_compaction_delay | int) | int }}" + delay: "{{ start_compaction_delay }}" + become: true diff --git a/roles/start_datamover/defaults/main.yml b/roles/start_datamover/defaults/main.yml index 373d9839..21095722 100644 --- a/roles/start_datamover/defaults/main.yml +++ b/roles/start_datamover/defaults/main.yml @@ -36,12 +36,10 @@ datamover_get_path: "{{ ansible_env.HOME }}/datamover" target_conf_dir: /etc/dss datamover_conf_dir: "{{ target_conf_dir }}/datamover" dss_dir: /usr/dss +target_dir: "{{ dss_dir }}/nkv-target" +target_bin_dir: "{{ target_dir }}/bin" datamover_dir: "{{ dss_dir }}/nkv-datamover" -### GCC defaults -gcc_setenv: /usr/local/bin/setenv-for-gcc510.sh -gcc_restore: /usr/local/bin/restore-default-paths-gcc510.sh - ### Datamover defaults # datamover_operation possible values: ['PUT', 'GET', 'LIST', 'DEL', 'TEST'] datamover_operation: PUT diff --git a/roles/start_datamover/tasks/datamover_DEL.yml b/roles/start_datamover/tasks/datamover_DEL.yml index bad82083..a7e11e56 100644 --- a/roles/start_datamover/tasks/datamover_DEL.yml +++ b/roles/start_datamover/tasks/datamover_DEL.yml @@ -31,14 +31,11 @@ - name: Delete data with datamover ansible.builtin.shell: > - source {{ gcc_setenv }} && python3 {{ datamover_dir }}/master_application.py {{ datamover_operation }} --config {{ datamover_conf_dir }}/config.json {{ compaction_string }} {{ dryrun_string }} {{ prefix_string }} - args: - chdir: "{{ datamover_dir }}" run_once: true register: master_application diff --git a/roles/start_datamover/tasks/datamover_GET.yml b/roles/start_datamover/tasks/datamover_GET.yml index d658b24f..ddce8b51 100644 --- a/roles/start_datamover/tasks/datamover_GET.yml +++ b/roles/start_datamover/tasks/datamover_GET.yml @@ -31,14 +31,11 @@ - name: Get data with datamover ansible.builtin.shell: > - source {{ gcc_setenv }} && python3 {{ datamover_dir }}/master_application.py {{ datamover_operation }} --config {{ datamover_conf_dir }}/config.json --dest_path {{ datamover_get_path }} {{ dryrun_string }} {{ prefix_string }} - args: - chdir: "{{ datamover_dir }}" register: master_application run_once: true diff --git a/roles/start_datamover/tasks/datamover_LIST.yml b/roles/start_datamover/tasks/datamover_LIST.yml index 67817517..3dd31cfa 100644 --- a/roles/start_datamover/tasks/datamover_LIST.yml +++ b/roles/start_datamover/tasks/datamover_LIST.yml @@ -31,13 +31,10 @@ - name: List data with datamover ansible.builtin.shell: > - source {{ gcc_setenv }} && python3 {{ datamover_dir }}/master_application.py {{ datamover_operation }} --config {{ datamover_conf_dir }}/config.json --dest_path {{ datamover_list_path }} {{ prefix_string }} - args: - chdir: "{{ datamover_dir }}" run_once: true register: master_application diff --git a/roles/start_datamover/tasks/datamover_PUT.yml b/roles/start_datamover/tasks/datamover_PUT.yml index 3257cd3f..0d28ad7c 100644 --- a/roles/start_datamover/tasks/datamover_PUT.yml +++ b/roles/start_datamover/tasks/datamover_PUT.yml @@ -31,15 +31,12 @@ - name: Upload NFS data with datamover ansible.builtin.shell: > - source {{ gcc_setenv }} && python3 {{ datamover_dir }}/master_application.py {{ datamover_operation }} --config {{ datamover_conf_dir }}/config.json {{ compaction_string }} {{ dryrun_string }} {{ prefix_string }} - args: - chdir: "{{ datamover_dir }}" register: master_application run_once: true until: "'DataMover RESUME operation is required' not in master_application.stdout" diff --git a/roles/start_datamover/tasks/datamover_TEST.yml b/roles/start_datamover/tasks/datamover_TEST.yml index 98d20d48..cd3daaca 100644 --- a/roles/start_datamover/tasks/datamover_TEST.yml +++ b/roles/start_datamover/tasks/datamover_TEST.yml @@ -31,7 +31,6 @@ - name: Test Datamover Data Integrity ansible.builtin.shell: > - source {{ gcc_setenv }} && python3 {{ datamover_dir }}/master_application.py {{ datamover_operation }} --config {{ datamover_conf_dir }}/config.json @@ -39,7 +38,5 @@ {{ skip_upload_string }} {{ prefix_string }} --dest_path {{ datamover_get_path }} - args: - chdir: "{{ datamover_dir }}" register: master_application run_once: true diff --git a/roles/start_datamover/tasks/main.yml b/roles/start_datamover/tasks/main.yml index 8736ff77..f88a6e5c 100644 --- a/roles/start_datamover/tasks/main.yml +++ b/roles/start_datamover/tasks/main.yml @@ -46,10 +46,21 @@ ansible.builtin.include_role: name: create_datamover_conf +- name: Stat dss_formatter + ansible.builtin.stat: + path: "{{ target_dir }}/bin/dss_formatter" + register: dss_formatter + - name: Set datamover vars ansible.builtin.set_fact: dryrun_string: "{% if datamover_dryrun | bool %}--dryrun{% else %}{% endif %}" - compaction_string: "{% if datamover_compaction | bool and dss_target_mode is search('kv_block') and not hpos %}--compaction yes{% else %}{% endif %}" + compaction_string: >- + {% if datamover_compaction | bool and + dss_target_mode is search('kv_block') and + not hpos and + not dss_formatter.stat.exists %} + --compaction yes + {% else %}{% endif %} data_integrity_string: "{% if datamover_data_integrity | bool %}--data_integrity{% else %}{% endif %}" skip_upload_string: "{% if datamover_skip_upload | bool %}--skip_upload{% else %}{% endif %}" prefix_string: "{% if datamover_prefix != '' %}--prefix={{ datamover_prefix }}{% else %}{% endif %}" diff --git a/roles/start_dss_host/defaults/main.yml b/roles/start_dss_host/defaults/main.yml index 6388d822..df635844 100644 --- a/roles/start_dss_host/defaults/main.yml +++ b/roles/start_dss_host/defaults/main.yml @@ -35,6 +35,7 @@ rdd_port: 1234 ### Path defaults dss_dir: /usr/dss +dss_log_dir: /var/log/dss nkv_sdk_dir: "{{ dss_dir }}/nkv-sdk" nkv_sdk_bin_dir: "{{ nkv_sdk_dir }}/bin" target_conf_dir: /etc/dss diff --git a/roles/start_dss_host/tasks/check_and_configure_driver.yml b/roles/start_dss_host/tasks/check_and_configure_driver.yml new file mode 100644 index 00000000..cf9bd77b --- /dev/null +++ b/roles/start_dss_host/tasks/check_and_configure_driver.yml @@ -0,0 +1,5 @@ +--- + +- name: Configure Driver + ansible.builtin.include_tasks: configure_driver.yml + when: all_subsystems_mounted is not defined diff --git a/roles/start_dss_host/tasks/configure_driver.yml b/roles/start_dss_host/tasks/configure_driver.yml index bad9f966..8a2e28bc 100644 --- a/roles/start_dss_host/tasks/configure_driver.yml +++ b/roles/start_dss_host/tasks/configure_driver.yml @@ -29,51 +29,45 @@ # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --- -- name: Run block if subsystems not mounted - block: +- name: "Initialize subsystems_found var #{{ config_num }}" + ansible.builtin.set_fact: + subsystems_found: [] + post_subsystems_found: [] - - name: "Initialize subsystems_found var #{{ config_num }}" - ansible.builtin.set_fact: - subsystems_found: [] - post_subsystems_found: [] +- name: "NVMe list subsystems - pre config_driver #{{ config_num }}" + ansible.builtin.command: nvme list-subsys + environment: + PATH: "{{ ansible_env.PATH }}:/sbin:/usr/sbin" + changed_when: false + register: nvme_subsys + become: true - - name: "NVMe list subsystems - pre config_driver #{{ config_num }}" - ansible.builtin.command: nvme list-subsys - environment: - PATH: "{{ ansible_env.PATH }}:/sbin:/usr/sbin" - changed_when: false - register: nvme_subsys - become: true +- name: "Check number of mounted subsystems #{{ config_num }}" + ansible.builtin.set_fact: + subsystems_found: "{{ nvme_subsys.stdout | regex_findall(nvme_re) | length }}" + vars: + nvme_re: 'traddr=[^\s]+\strsvcid=[^\s]+\slive' - - name: "Check number of mounted subsystems #{{ config_num }}" - ansible.builtin.set_fact: - subsystems_found: "{{ nvme_subsys.stdout | regex_findall(nvme_re) | length }}" - vars: - nvme_re: 'traddr=[^\s]+\strsvcid=[^\s]+\slive' +- name: "Configure Driver #{{ config_num }}" + ansible.builtin.command: "sh {{ target_conf_dir }}/dss_host_config_host.sh" + when: subsystems_found != combined_expected_num_subsystems + become: true - - name: "Configure Driver #{{ config_num }}" - ansible.builtin.command: sh ./dss_host_config_host.sh - args: - chdir: "{{ nkv_sdk_bin_dir }}" - when: subsystems_found != combined_expected_num_subsystems - become: true +- name: "NVMe list subsystems - post config_driver #{{ config_num }}" + ansible.builtin.command: nvme list-subsys + environment: + PATH: "{{ ansible_env.PATH }}:/sbin:/usr/sbin" + changed_when: false + register: post_nvme_subsys + become: true - - name: "NVMe list subsystems - post config_driver #{{ config_num }}" - ansible.builtin.command: nvme list-subsys - environment: - PATH: "{{ ansible_env.PATH }}:/sbin:/usr/sbin" - changed_when: false - register: post_nvme_subsys - become: true +- name: "Post-Check subsystems are mounted #{{ config_num }}" + ansible.builtin.set_fact: + post_subsystems_found: "{{ post_nvme_subsys.stdout | regex_findall(nvme_re) | length }}" + vars: + nvme_re: 'traddr=[^\s]+\strsvcid=[^\s]+\slive' - - name: "Post-Check subsystems are mounted #{{ config_num }}" - ansible.builtin.set_fact: - post_subsystems_found: "{{ post_nvme_subsys.stdout | regex_findall(nvme_re) | length }}" - vars: - nvme_re: 'traddr=[^\s]+\strsvcid=[^\s]+\slive' - - - name: "Check all subsystems are mounted #{{ config_num }}" - ansible.builtin.set_fact: - all_subsystems_mounted: true - when: post_subsystems_found == combined_expected_num_subsystems - when: all_subsystems_mounted is not defined +- name: "Check all subsystems are mounted #{{ config_num }}" + ansible.builtin.set_fact: + all_subsystems_mounted: true + when: post_subsystems_found == combined_expected_num_subsystems diff --git a/roles/start_dss_host/tasks/main.yml b/roles/start_dss_host/tasks/main.yml index 9cd3462a..381d3cd4 100644 --- a/roles/start_dss_host/tasks/main.yml +++ b/roles/start_dss_host/tasks/main.yml @@ -70,9 +70,7 @@ label: "{{ item.item }}" - name: Configure Driver - ansible.builtin.command: /usr/bin/python2 ./dss_host.py config_driver - args: - chdir: "{{ nkv_sdk_bin_dir }}" + ansible.builtin.command: "/usr/bin/python2 {{ nkv_sdk_bin_dir }}/dss_host.py config_driver" when: - inventory_hostname in host_hostnames - missing_drivers is defined diff --git a/roles/start_dss_host/tasks/setup_host_clusters.yml b/roles/start_dss_host/tasks/setup_host_clusters.yml index b7583138..798527a7 100644 --- a/roles/start_dss_host/tasks/setup_host_clusters.yml +++ b/roles/start_dss_host/tasks/setup_host_clusters.yml @@ -106,8 +106,18 @@ - name: Create dss_host_config_host.sh script ansible.builtin.copy: - content: > - /usr/bin/python2 ./dss_host.py config_host -a + content: | + #! /usr/bin/env bash + pushd {{ nkv_sdk_bin_dir }} + { + {{ dss_host_py_cmd }} + } 2>&1 | tee {{ dss_log_dir }}/dss_host.py.out + popd + mode: 0755 + dest: "{{ target_conf_dir }}/dss_host_config_host.sh" + vars: + dss_host_py_cmd: >- + /usr/bin/python2 {{ nkv_sdk_bin_dir }}/dss_host.py config_host -a {% for ip in first_vlan_ip_list %}{{ ip }}{% if not loop.last %} {% endif %}{% endfor %} -p {{ start_dss_host_port }} -i {{ start_dss_host_qpairs }} -m {{ start_dss_host_mem_align }} @@ -121,13 +131,11 @@ --rdd_port {{ rdd_port }} --gen2 {% endif %} - mode: 0755 - dest: "{{ nkv_sdk_bin_dir }}/dss_host_config_host.sh" when: inventory_hostname in host_hostnames become: true - name: Configure Driver - ansible.builtin.include_tasks: configure_driver.yml + ansible.builtin.include_tasks: check_and_configure_driver.yml loop: "{{ range(1,4) | list }}" loop_control: loop_var: config_num @@ -143,12 +151,3 @@ Expected number of subsystems: {{ combined_expected_num_subsystems }} Actual number of mounted subsystems: {{ post_subsystems_found }} when: inventory_hostname in host_hostnames - -- name: Execute dss_host_config_host.sh script - ansible.builtin.command: sh ./dss_host_config_host.sh - args: - chdir: "{{ nkv_sdk_bin_dir }}" - when: - - subsystems_found is not defined - - inventory_hostname in host_hostnames - become: true diff --git a/roles/start_dss_minio/defaults/main.yml b/roles/start_dss_minio/defaults/main.yml index 03ddcbab..3f43847d 100644 --- a/roles/start_dss_minio/defaults/main.yml +++ b/roles/start_dss_minio/defaults/main.yml @@ -37,6 +37,7 @@ nkv_sdk_bin_dir: "{{ nkv_sdk_dir }}/bin" nkv_sdk_conf_dir: "{{ nkv_sdk_dir }}/conf" nkv_sdk_lib_dir: "{{ nkv_sdk_dir }}/lib" dss_log_dir: /var/log/dss +target_conf_dir: /etc/dss ### Cluster defaults cluster_num: 0 diff --git a/roles/start_dss_minio/tasks/generate_scripts.yml b/roles/start_dss_minio/tasks/generate_scripts.yml index 60d3d814..179ac716 100644 --- a/roles/start_dss_minio/tasks/generate_scripts.yml +++ b/roles/start_dss_minio/tasks/generate_scripts.yml @@ -48,7 +48,7 @@ - name: Create minio scripts ansible.builtin.template: src: minio_startup.sh.j2 - dest: "{{ nkv_sdk_bin_dir }}/minio_startup_{{ tcp_alias }}.sh" + dest: "{{ target_conf_dir }}/minio_startup_{{ tcp_alias }}.sh" mode: 0751 become: true loop: "{{ range(0, adjacent_numa_list | length) | list }}" diff --git a/roles/start_dss_minio/tasks/start_minio_clusters.yml b/roles/start_dss_minio/tasks/start_minio_clusters.yml index 24939358..028f88b4 100644 --- a/roles/start_dss_minio/tasks/start_minio_clusters.yml +++ b/roles/start_dss_minio/tasks/start_minio_clusters.yml @@ -47,10 +47,8 @@ - name: Start minio instances ansible.builtin.shell: > sleep 1 && - nohup ./minio_startup_{{ tcp_alias }}.sh > + nohup {{ target_conf_dir }}/minio_startup_{{ tcp_alias }}.sh > {{ dss_log_dir }}/nkv-minio_{{ tcp_alias }}.log 2>&1 & - args: - chdir: "{{ nkv_sdk_bin_dir }}" loop: "{{ tcp_alias_list }}" loop_control: label: "{{ tcp_alias }}" diff --git a/roles/start_dss_target/tasks/dss_target.yml b/roles/start_dss_target/tasks/dss_target.yml index 0ccac06d..edef3eba 100644 --- a/roles/start_dss_target/tasks/dss_target.yml +++ b/roles/start_dss_target/tasks/dss_target.yml @@ -61,7 +61,8 @@ ansible.builtin.copy: content: | export PCI_BLACKLIST="{{ pci_blacklist | d('') }}" - /usr/bin/python2 ./dss_target.py configure \ + pushd {{ target_dir }}/bin/ + /usr/bin/python2 {{ target_dir }}/bin/dss_target.py configure \ --config_file {{ target_conf_file }} \ --ip_addresses {% for ip in rocev2_ip_list %}{{ ip }}{% if not loop.last %} {% endif %}{% endfor %} \ --kv_firmware {{ target_fw_version }} \ @@ -75,8 +76,9 @@ --tcp_alias_list "{{ tcp_alias_list }}" \ --rdd_port {{ rdd_port }} \ --gen2 + popd {% endif %} - dest: "{{ target_dir }}/bin/dss_target_config.sh" + dest: "{{ target_conf_dir }}/dss_target_config.sh" mode: 0755 register: dss_target_config_script become: true @@ -89,9 +91,7 @@ - spdk_status.stdout is search('uio_pci_generic') - name: Configure target with dss_target_config.sh script - ansible.builtin.command: sh ./dss_target_config.sh - args: - chdir: "{{ target_dir }}/bin" + ansible.builtin.command: "sh {{ target_conf_dir }}/dss_target_config.sh" register: dss_target_configure become: true when: > @@ -117,10 +117,10 @@ {{ dss_target_configure.stderr }} when: dss_target_configure.changed -- name: Save output of dss_target.py +- name: Save output of dss_target.py.out ansible.builtin.copy: content: "{{ dss_target_configure.stdout }}" - dest: "{{ target_dir }}/bin/dss_target.out" + dest: "{{ dss_log_dir }}/dss_target.py.out" mode: 0644 become: true when: dss_target_configure.changed diff --git a/roles/start_dss_target/tasks/main.yml b/roles/start_dss_target/tasks/main.yml index fc1949c4..3e314e64 100644 --- a/roles/start_dss_target/tasks/main.yml +++ b/roles/start_dss_target/tasks/main.yml @@ -44,16 +44,14 @@ path: "{{ target_conf_file }}" register: nvmf_in_conf -- name: Stat dss_target.out +- name: Stat dss_target.py.out ansible.builtin.stat: - path: "{{ target_dir }}/bin/dss_target.out" + path: "{{ dss_log_dir }}/dss_target.py.out" register: dss_target_out - name: Get spdk status - ansible.builtin.command: ./setup.sh status + ansible.builtin.command: "{{ target_dir }}/scripts/setup.sh status" changed_when: false - args: - chdir: "{{ target_dir }}/scripts" register: spdk_status become: true @@ -74,13 +72,13 @@ Disk serial numbers not found in nvmf.in.conf for firmware {{ target_fw_version }} Please verify that 'target_fw_version' var matches desired firmware. -- name: Format BlobFS +- name: Format KV File System ansible.builtin.include_role: - name: format_blobfs + name: format_kvfs when: dss_target_mode is search('kv_block') -- name: Read dss_target.out - ansible.builtin.command: "cat {{ target_dir }}/bin/dss_target.out" +- name: Read dss_target.py.out + ansible.builtin.command: "cat {{ dss_log_dir }}/dss_target.py.out" become: true changed_when: false register: dss_target_out @@ -123,15 +121,20 @@ that: xrt_setenv_script.stat.exists when: hpos +- name: Stat dss_formatter + ansible.builtin.stat: + path: "{{ target_dir }}/bin/dss_formatter" + register: dss_formatter + - name: Start DSS target process ansible.builtin.shell: > {% if xrt_setenv_script.stat.exists %} source {{ xrt_setenv }} && {% endif %} + {% if not dss_formatter.stat.exists %} source {{ gcc_setenv }} && + {% endif %} {{ nvmf_tgt_cmd }} > {{ dss_log_dir }}/nkv-target.log 2>&1 & - args: - chdir: "{{ target_dir }}/bin" when: nvmf_ps.rc != 0 become: true diff --git a/roles/support_bundle/tasks/collect_target_scripts.yml b/roles/support_bundle/tasks/collect_target_scripts.yml index f11349c1..43fd8e64 100644 --- a/roles/support_bundle/tasks/collect_target_scripts.yml +++ b/roles/support_bundle/tasks/collect_target_scripts.yml @@ -31,8 +31,11 @@ - name: Find target setup script, dss_target.py output, and mkfs_blobfs output ansible.builtin.find: - paths: "{{ target_bin_dir }}" - patterns: 'dss_target_config.sh,dss_target.out,format_disks.sh,format_disks.out' + paths: + - "{{ target_bin_dir }}" + - "{{ target_conf_dir }}" + - "{{ dss_log_dir }}" + patterns: 'dss_target_config.sh,dss_target.py.out,format_disks.sh,format_disks.out' recurse: false register: target_setup_logs diff --git a/roles/test_nkv_test_cli/defaults/main.yml b/roles/test_nkv_test_cli/defaults/main.yml index b23e88f5..47925f20 100644 --- a/roles/test_nkv_test_cli/defaults/main.yml +++ b/roles/test_nkv_test_cli/defaults/main.yml @@ -33,15 +33,24 @@ dss_dir: /usr/dss nkv_sdk_dir: "{{ dss_dir }}/nkv-sdk" nkv_sdk_bin_dir: "{{ nkv_sdk_dir }}/bin" +nkv_sdk_conf_dir: "{{ nkv_sdk_dir }}/conf" +dss_log_dir: /var/log/dss ### nkv_test_cli defaults +nkv_test_cli_port: 1030 +nkv_test_cli_prefix: meta/ansible nkv_test_cli_keysize: 60 nkv_test_cli_valsize: 1048576 nkv_test_cli_threads: 128 -nkv_test_cli_objects: 2000 -nkv_test_cli_vm_objects: 100 +nkv_test_cli_objects_phys: 2000 +nkv_test_cli_vm_objects_vm: 100 nkv_test_cli_async_timeout: 600 nkv_test_cli_async_retry_delay: 5 +nkv_test_cli_test: smoke +nkv_test_cli_suite: suite001 +nkv_test_cli_integrity: false +nkv_test_cli_mixed_io: false +nkv_test_cli_simulate_minio: false ### VLAN defaults rocev2_vlans: diff --git a/roles/test_nkv_test_cli/tasks/case.yml b/roles/test_nkv_test_cli/tasks/case.yml new file mode 100644 index 00000000..01c597aa --- /dev/null +++ b/roles/test_nkv_test_cli/tasks/case.yml @@ -0,0 +1,18 @@ +--- + +- name: "Execute test step: {{ test_case.id }}: {{ test_case.description }}" + ansible.builtin.include_tasks: nkv_test_cli.yml + vars: + operation: "{{ lookup('vars', 'test_nkv_test_cli_' + step) }}" + nkv_test_cli_integrity: "{{ test_case.integrity | d(false) }}" + nkv_test_cli_mixed_io: "{{ test_case.mixed_io | d(false) }}" + nkv_test_cli_simulate_minio: "{{ test_case.simulate_minio | d(false) }}" + nkv_test_cli_keysize: "{{ test_case.keysize | d(suite_keysize) }}" + nkv_test_cli_valsize: "{{ test_case.valsize | d(suite_valsize) }}" + nkv_test_cli_objects: "{{ test_case.objects | d(suite_objects) }}" + nkv_test_cli_threads: "{{ test_case.threads | d(suite_threads) }}" + assert_num_keys: "{{ test_case.expected_list_keys | d(test_case.threads | d(suite_threads) | int * test_case.objects | d(suite_objects) | int) }}" + test_case_step: "{{ test_case.id }}: {{ step }}" + loop: "{{ test_case.steps }}" + loop_control: + loop_var: step diff --git a/roles/test_nkv_test_cli/tasks/main.yml b/roles/test_nkv_test_cli/tasks/main.yml index f83f1994..ecf2e7d7 100644 --- a/roles/test_nkv_test_cli/tasks/main.yml +++ b/roles/test_nkv_test_cli/tasks/main.yml @@ -34,56 +34,38 @@ target_hostnames: "{{ (groups['servers'] | d([]) + groups['targets'] | d([])) | unique }}" host_hostnames: "{{ (groups['servers'] | d([]) + groups['hosts'] | d([])) | unique }}" +- name: Set host_id var from inventory + ansible.builtin.set_fact: + host_id: "{{ ansible_loop.index }}" + when: > + item == inventory_hostname + loop: "{{ ansible_play_hosts }}" + loop_control: + extended: true + - name: Get RoCEv2 IP lists and NUMA ansible.builtin.include_tasks: get_ip_numa.yml when: inventory_hostname in host_hostnames -- name: Execute nkv_test_cli put - ansible.builtin.include_tasks: nkv_test_cli.yml - vars: - operation: "{{ test_nkv_test_cli_put }}" - when: inventory_hostname in host_hostnames +- name: Create log dir + ansible.builtin.file: + path: "{{ dss_log_dir }}" + state: directory + mode: 0755 + owner: "{{ ansible_effective_user_id }}" + group: "{{ ansible_effective_group_id }}" + become: true -- name: Set put_throughput +- name: Set nkv_test_cli_objects default - vm ansible.builtin.set_fact: - put_throughput: "{{ combined_throughput }}" - when: inventory_hostname in host_hostnames - -- name: Start Compaction - ansible.builtin.include_role: - name: start_compaction - when: - - inventory_hostname in target_hostnames - - dss_target_mode is search('kv_block') + nkv_test_cli_objects: "{{ nkv_test_cli_objects | d(nkv_test_cli_vm_objects_vm) }}" + when: ansible_virtualization_role == 'guest' -- name: Execute nkv_test_cli get - ansible.builtin.include_tasks: nkv_test_cli.yml - vars: - operation: "{{ test_nkv_test_cli_get }}" - when: inventory_hostname in host_hostnames - -- name: Set get_throughput +- name: Set nkv_test_cli_objects default - physical ansible.builtin.set_fact: - get_throughput: "{{ combined_throughput }}" - when: inventory_hostname in host_hostnames + nkv_test_cli_objects: "{{ nkv_test_cli_objects | d(nkv_test_cli_objects_phys) }}" + when: ansible_virtualization_role != 'guest' -- name: Execute nkv_test_cli delete - ansible.builtin.include_tasks: nkv_test_cli.yml - vars: - operation: "{{ test_nkv_test_cli_delete }}" - when: inventory_hostname in host_hostnames -- name: Start Compaction - ansible.builtin.include_role: - name: start_compaction - when: - - inventory_hostname in target_hostnames - - dss_target_mode is search('kv_block') - -- name: Print throughput - ansible.builtin.debug: - msg: | - Put throughput: {{ '%0.2f' | format(put_throughput | float) }} GB/s - Get throughput: {{ '%0.2f' | format(get_throughput | float) }} GB/s - run_once: true - when: inventory_hostname in host_hostnames +- name: Execute nkv_test_cli test + ansible.builtin.include_tasks: "{{ nkv_test_cli_test }}.yml" diff --git a/roles/test_nkv_test_cli/tasks/nkv_test_cli.yml b/roles/test_nkv_test_cli/tasks/nkv_test_cli.yml index 8b799d91..7dcc9fb6 100644 --- a/roles/test_nkv_test_cli/tasks/nkv_test_cli.yml +++ b/roles/test_nkv_test_cli/tasks/nkv_test_cli.yml @@ -29,36 +29,47 @@ # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --- -- name: Execute nkv_test_cli command - ansible.builtin.command: "{{ nkv_test_cli_command }}" +- name: "Execute nkv_test_cli command {{ test_case_step | d('') }}" + ansible.builtin.shell: "set -e -o pipefail && {{ nkv_test_cli_command | regex_replace('[\\r\\n\\t]+','') }}" args: chdir: "{{ nkv_sdk_bin_dir }}" + executable: /usr/bin/bash loop: "{{ rocev2_numa_prefix_list }}" loop_control: loop_var: vlan - label: "{{ nkv_test_cli_command }}" + label: "{{ nkv_test_cli_command | regex_replace('[\\r\\n\\t]+','') }}" extended: true vars: nkv_test_cli_command: >- - /usr/bin/python2 {{ nkv_sdk_bin_dir }}/dss_host.py - verify_nkv_cli - --conf nkv_config_{{ vlan.ip }}.json - --addr {{ vlan.ipv4_prefix }} - --numa {{ vlan.numa }} - --workload {{ operation }} - --keysize {{ nkv_test_cli_keysize }} - --valsize {{ nkv_test_cli_valsize }} - --threads {{ nkv_test_cli_threads }} - --numobj - {% if ansible_virtualization_role == 'guest' -%} - {{ nkv_test_cli_vm_objects }} - {% else -%} - {{ nkv_test_cli_objects }} + numactl -N {{ vlan.numa }} -m {{ vlan.numa }} + {{ nkv_sdk_bin_dir }}/nkv_test_cli + -c {{ nkv_sdk_conf_dir }}/nkv_config_{{ vlan.ip }}.json + -i {{ inventory_hostname }} + -p {{ nkv_test_cli_port }} + -b {{ nkv_test_cli_prefix }}/host{{ host_id }}/test{{ ansible_loop.index0 }}/ + -o {{ operation }} + {% if operation != '4' -%} + -t {{ nkv_test_cli_threads }} + -k {{ nkv_test_cli_keysize }} + -v {{ nkv_test_cli_valsize }} + -n {{ nkv_test_cli_objects }} {% endif %} + {% if nkv_test_cli_integrity | bool -%} + -m 1 + {% endif %} + {% if nkv_test_cli_mixed_io | bool -%} + -d 1 + {% endif %} + {% if nkv_test_cli_simulate_minio | bool -%} + -g 1 + {% endif %} + 2>&1 | tee -a {{ dss_log_dir }}/nkv_test_cli-test{{ ansible_loop.index0 }}.log async: "{{ nkv_test_cli_async_timeout }}" poll: 0 register: nkv_test_cli_async become: true + tags: + - skip_ansible_lint - name: Check async nkv_test_cli command ansible.builtin.async_status: @@ -70,7 +81,7 @@ loop: "{{ nkv_test_cli_async.results }}" loop_control: loop_var: async_task - label: "{{ async_task.vlan.ipv4_prefix }}" + label: "{{ async_task.vlan.ip }}" become: true - name: Assert async nkv_test_cli completion @@ -81,7 +92,32 @@ loop: "{{ async_results.results }}" loop_control: loop_var: async_result - label: "{{ async_result.cmd | join(' ') }}" + label: "{{ async_result.cmd | regex_replace('[\\r\\n\\t]+','') }}" + +- name: Check nkv_test_cli output for errors + ansible.builtin.debug: + msg: "Checking for errors in nkv_test_cli output..." + loop: "{{ async_results.results }}" + loop_control: + loop_var: async_result + label: "{{ async_result.cmd | regex_replace('[\\r\\n\\t]+','') }}" + failed_when: >- + async_result.stdout is search('Container Path down') or + async_result.stdout is search('In simulated minio mode, number of drives should be at least 4') or + async_result.stdout is search('Lock tuple failed with error') or + async_result.stdout is search('NKV lock KVP call failed') or + async_result.stdout is search('NKV lock operation failed') or + async_result.stdout is search('NKV open failed') or + async_result.stdout is search('NKV RDD chunked Get KVP call failed') or + async_result.stdout is search('NKV RDD Retrieve KVP call failed') or + async_result.stdout is search('NKV Retrieve KVP call failed') or + async_result.stdout is search('NKV Store KVP call failed') or + async_result.stdout is search('NKV Store KVP checksum call failed') or + async_result.stdout is search('NKV store operation failed') or + async_result.stdout is search('Opening path failed') or + async_result.stdout is search('Path open failed') or + async_result.stdout is search('store tuple failed with error') or + async_result.stdout is search('Unsupported operation provided') - name: Initialize throughput vars ansible.builtin.set_fact: @@ -90,14 +126,15 @@ - name: Set throughput ansible.builtin.set_fact: - throughput: "{{ (throughput | float) + (async_result.stdout | regex_search(nkv_re, '\\1') | first | float) }}" + throughput: "{{ (throughput | float) + (async_result.stdout | regex_search(nkv_re, '\\1') | first | float / 1000) }}" vars: nkv_re: >- - BW = ([^ ]+) GB/s + Throughput = ([^ ]+) MB/sec loop: "{{ async_results.results }}" loop_control: loop_var: async_result - label: "{{ async_result.cmd | join(' ') }}" + label: "{{ async_result.async_task.vlan.ip }}" + when: async_result.stdout | regex_search(nkv_re, '\\1') - name: Set combined_throughput ansible.builtin.set_fact: @@ -106,3 +143,20 @@ loop_control: loop_var: host run_once: true + +- name: Assert total_num_keys for list operation + ansible.builtin.assert: + that: expected_total_num_keys | int == async_result.stdout | regex_search(nkv_re, '\\1') | first | int + fail_msg: "Expected {{ expected_total_num_keys }} - found {{ async_result.stdout | regex_search(nkv_re, '\\1') | first | int }}" + quiet: true + vars: + nkv_re: >- + total_num_keys = ([^ \r\n]+) + expected_total_num_keys: "{{ assert_num_keys | d(nkv_test_cli_threads | int * nkv_test_cli_objects | int) }}" + loop: "{{ async_results.results }}" + loop_control: + loop_var: async_result + label: "{{ async_result.async_task.vlan.ip }}" + when: + - operation == '4' + - async_result.stdout | regex_search(nkv_re, '\\1') diff --git a/roles/test_nkv_test_cli/tasks/smoke.yml b/roles/test_nkv_test_cli/tasks/smoke.yml new file mode 100644 index 00000000..6bf81921 --- /dev/null +++ b/roles/test_nkv_test_cli/tasks/smoke.yml @@ -0,0 +1,52 @@ +# Smoke test to validate cluster throughput with nkv_test_cli (the teste that this play executes by default) +--- + +- name: Execute nkv_test_cli put + include_tasks: nkv_test_cli.yml + vars: + operation: "{{ test_nkv_test_cli_put }}" + when: inventory_hostname in host_hostnames + +- name: Set put_throughput + set_fact: + put_throughput: "{{ combined_throughput }}" + when: inventory_hostname in host_hostnames + +- name: Start Compaction + include_role: + name: start_compaction + when: + - inventory_hostname in target_hostnames + - dss_target_mode is search('kv_block') + +- name: Execute nkv_test_cli get + include_tasks: nkv_test_cli.yml + vars: + operation: "{{ test_nkv_test_cli_get }}" + when: inventory_hostname in host_hostnames + +- name: Set get_throughput + set_fact: + get_throughput: "{{ combined_throughput }}" + when: inventory_hostname in host_hostnames + +- name: Execute nkv_test_cli delete + include_tasks: nkv_test_cli.yml + vars: + operation: "{{ test_nkv_test_cli_delete }}" + when: inventory_hostname in host_hostnames + +- name: Start Compaction + include_role: + name: start_compaction + when: + - inventory_hostname in target_hostnames + - dss_target_mode is search('kv_block') + +- name: Print throughput + debug: + msg: | + Put throughput: {{ '%0.2f' | format(put_throughput | float) }} GB/s + Get throughput: {{ '%0.2f' | format(get_throughput | float) }} GB/s + run_once: true + when: inventory_hostname in host_hostnames diff --git a/roles/test_nkv_test_cli/tasks/suite.yml b/roles/test_nkv_test_cli/tasks/suite.yml new file mode 100644 index 00000000..a71289b8 --- /dev/null +++ b/roles/test_nkv_test_cli/tasks/suite.yml @@ -0,0 +1,26 @@ +--- + +- name: Include vars + ansible.builtin.include_vars: "{{ nkv_test_cli_suite }}.yml" + +- name: Set suit_key var + ansible.builtin.set_fact: + suite_key: "{{ nkv_test_cli_prefix }}" + suite_keysize: "{{ nkv_test_cli_keysize }}" + suite_objects: "{{ nkv_test_cli_objects }}" + suite_valsize: "{{ nkv_test_cli_valsize }}" + suite_threads: "{{ nkv_test_cli_threads }}" + suite_prefix: "{{ nkv_test_cli_prefix }}" + +- name: Executing Test suite + debug: + msg: "Test suite: {{ nkv_test_cli_suite }}" + run_once: true + +- name: "Execute test case" + include_tasks: case.yml + loop: "{{ test_cases }}" + vars: + test_case: "{{ item }}" + nkv_test_cli_prefix: "{{ test_case.prefix | d(suite_prefix) }}/{{ nkv_test_cli_suite }}/{{ test_case.id }}" + when: inventory_hostname in host_hostnames diff --git a/roles/test_nkv_test_cli/vars/main.yml b/roles/test_nkv_test_cli/vars/main.yml index 1077d5f0..6a7a00f9 100644 --- a/roles/test_nkv_test_cli/vars/main.yml +++ b/roles/test_nkv_test_cli/vars/main.yml @@ -32,3 +32,12 @@ test_nkv_test_cli_put: 0 test_nkv_test_cli_get: 1 test_nkv_test_cli_delete: 2 +test_nkv_test_cli_put_get_delete: 3 +test_nkv_test_cli_list: 4 +test_nkv_test_cli_put_list: 5 +test_nkv_test_cli_lock_unlock: 6 +test_nkv_test_cli_chunked_put: 7 +test_nkv_test_cli_rdd_get: 8 +test_nkv_test_cli_rdd_chunked_get: 9 +test_nkv_test_cli_chunked_del: 10 +test_nkv_test_cli_rdd_put: 11 diff --git a/roles/test_nkv_test_cli/vars/suite001.yml b/roles/test_nkv_test_cli/vars/suite001.yml new file mode 100644 index 00000000..06fcbd4f --- /dev/null +++ b/roles/test_nkv_test_cli/vars/suite001.yml @@ -0,0 +1,99 @@ +--- + +# Default nkv_test_cli values to be used by all tests +nkv_test_cli_keysize: 60 +nkv_test_cli_valsize: 1048576 +nkv_test_cli_threads: 5 +nkv_test_cli_objects: 5 + +# List of test cases to execute. +# Test cases must include: +# - id: ID of test case to run - may be a JIRA ticket # in case of regression/defect +# - description: Plain-English description of test case (objective of the test case) +# - steps: A list of steps to perform. Can be one of the following in any order: +# - put +# - get +# - delete +# - put_get_delete +# - list +# - put_list +# - lock_unlock +# - chunked_put +# - rdd_get +# - rdd_chunked_get +# - chunked_del +# - rdd_put +# Note: +# - list operation will validate the number of objects at the unique key prefix of the test case. +# It is expected to perform "list" after "put" to validate keys exist. +# By default it is asserted that "list" will find a number of keys equaling threads x objects. +# If a different number of keys is expected (eg: list before put, list after delete) you can +# specify the expected num. keys by setting "expected_list_keys" for each test case. +# Optional settings for each test case: +# - keysize: key size of each object in bytes (min: 1, max: 1024) +# - valsize: value size of each object in bytes (min: 1, max: 1048576) +# - threads: number of threads +# - objects: number of objects to write (per thread) +# - integrity: use data-integrity test +# - mixed_io: small meta io before doing a big io +# - simulate_minio: generate IO pattern similar to MinIO +# - assert_num_keys: Expected number of keys to find during list operation (default: num. objects x num. threads) + +test_cases: + # - id: MIN-1761 + # description: put single object with 255-byte key + # keysize: 255 + # threads: 1 + # objects: 1 + # steps: + # - put + - id: N001 + description: put and list + steps: + - put + - list + - id: N002 + description: put, delete, list + expected_list_keys: 0 + steps: + - put + - delete + - list + - id: N003 + description: put, get, delete objects with data integrity check + integrity: true + steps: + - put_get_delete + - id: N004 + description: mixed_io testing + mixed_io: true + steps: + - put + - id: N005 + description: simulate minio io testing + simulate_minio: true + steps: + - put + - id: N006a-N007a + description: large key test - put and get objects with 160-byte key + keysize: 160 + steps: + - put + - get + # - id: N006b-N007b + # description: large key test - put and get objects with 255-byte key + # keysize: 255 + # steps: + # - put + # - get + - id: N006c-N007c + description: large key test - put and get objects with 1024-byte key + keysize: 1024 + steps: + - put + - get + - id: N008 + description: chunked put and del + steps: + - chunked_put + - chunked_del diff --git a/roles/test_nkv_test_cli/vars/suite002.yml b/roles/test_nkv_test_cli/vars/suite002.yml new file mode 100644 index 00000000..38d128b9 --- /dev/null +++ b/roles/test_nkv_test_cli/vars/suite002.yml @@ -0,0 +1,26 @@ +--- + +# suite002 - Test suite targeting gen2 specific nkv_test_cli features +# To use - make sure cluster is deployed with `gen2=true` + +nkv_test_cli_keysize: 60 +nkv_test_cli_valsize: 1048576 +nkv_test_cli_threads: 32 +nkv_test_cli_objects: 1000 + +test_cases: + - id: G2001 + description: RDD put and get + steps: + - rdd_put + - rdd_get + - id: G2002 + description: Chunked put then RDD chunked get + steps: + - chunked_put + - rdd_chunked_get + - id: G2003 + description: Chunked put then chunked delete + steps: + - chunked_put + - chunked_del diff --git a/roles/test_nkv_test_cli/vars/suite003.yml b/roles/test_nkv_test_cli/vars/suite003.yml new file mode 100644 index 00000000..112f3a94 --- /dev/null +++ b/roles/test_nkv_test_cli/vars/suite003.yml @@ -0,0 +1,87 @@ +--- + +# suite003 - GCOV test suite + +# Default test parameters +nkv_test_cli_keysize: 60 +nkv_test_cli_valsize: 1048576 +nkv_test_cli_threads: 10 +nkv_test_cli_objects: 1000 + +test_cases: + + - id: GCOV001 + description: meta-prefix put and list + prefix: meta/ansible + valsize: 400 + steps: + - put + - list + + - id: GCOV002 + description: meta-prefix put, delete and list + prefix: meta/ansible + valsize: 400 + expected_list_keys: 0 + steps: + - put + - delete + - list + + - id: GCOV003 + description: meta-prefix put and list twice (overwrite) + prefix: meta/ansible + valsize: 400 + steps: + - put + - list + - put + - list + + - id: GCOV004 + description: put and list + expected_list_keys: 0 + prefix: data/ansible + steps: + - put + - list + + - id: GCOV005 + description: put, delete + expected_list_keys: 0 + prefix: data/ansible + steps: + - put + - delete + - list + + - id: GCOV006 + description: put_get_delete with data integrity check + prefix: data/ansible + integrity: true + steps: + - put_get_delete + + - id: GCOV007 + description: put, get, delete mixed_io testing + prefix: data/ansible + mixed_io: true + steps: + - put + - get + - delete + +# BROKEN +# - id: GCOV010 +# description: RDD put and get +# steps: +# - rdd_put +# - rdd_get + +# BROKEN +# - id: GCOV011 +# description: RDD put and get non-meta prefix +# prefix: data/ansible +# steps: +# - rdd_put +# - rdd_get diff --git a/roles/test_s3_benchmark/defaults/main.yml b/roles/test_s3_benchmark/defaults/main.yml index 24952a56..db112453 100644 --- a/roles/test_s3_benchmark/defaults/main.yml +++ b/roles/test_s3_benchmark/defaults/main.yml @@ -46,6 +46,7 @@ s3_benchmark_async_retry_delay: 5 s3_benchmark_max_instances_per_client: 0 s3_benchmark_strict_numa: false s3_benchmark_max_value_size: 4194304 +s3_benchmark_dss_lib: 0 ### MinIO defaults minio_port: 9000 diff --git a/roles/test_s3_benchmark/tasks/s3_benchmark.yml b/roles/test_s3_benchmark/tasks/s3_benchmark.yml index a75eae51..a280e529 100644 --- a/roles/test_s3_benchmark/tasks/s3_benchmark.yml +++ b/roles/test_s3_benchmark/tasks/s3_benchmark.yml @@ -67,6 +67,7 @@ -u http://{{ endpoint.endpoint }}:{{ minio_port }} -t {{ s3_benchmark_num_threads }} -z {{ s3_benchmark_object_size }} + -dss_lib {{ s3_benchmark_dss_lib }} -{% if operation != 'GET' %}n{% else %}c{% endif %} {% if ansible_virtualization_role == 'guest' -%} {{ s3_benchmark_num_objects_vm }} diff --git a/scripts/dependencies/install.sh b/scripts/dependencies/install.sh new file mode 100755 index 00000000..928e577f --- /dev/null +++ b/scripts/dependencies/install.sh @@ -0,0 +1,28 @@ +#! /usr/bin/env bash +# shellcheck source=/dev/null +set -e + +# Path variables +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +if [[ -e /etc/os-release ]]; then + source /etc/os-release +else + ID=unknown +fi + +# Default paths in case they are not exported automatically +export PATH=$PATH:/usr/local/bin:/usr/local/sbin + +for id in $ID $ID_LIKE; do + if [[ -e $SCRIPT_DIR/os/$id.sh ]]; then + echo "os: $id" + source "$SCRIPT_DIR/os/$id.sh" + source "$SCRIPT_DIR/os/common.sh" + exit 0 + fi +done + +printf "Non-supported distribution detected: %s\n" "$ID" >&2 +echo "Aborting!" +exit 1 diff --git a/scripts/dependencies/os/common.sh b/scripts/dependencies/os/common.sh new file mode 100755 index 00000000..baad5d16 --- /dev/null +++ b/scripts/dependencies/os/common.sh @@ -0,0 +1,28 @@ +#! /usr/bin/env bash +set -e + +# Path variables +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +REQUIREMENTS=$(realpath "$SCRIPT_DIR/../python/requirements.txt") + +# Install python3 pip +python3 -m ensurepip --upgrade + +# Upgrade pip to the latest version +python3 -m pip install pip --upgrade + +# Install python modules from requirements.txt +PIP_ARGS=() +PIP_ARGS+=("-r") +PIP_ARGS+=("$REQUIREMENTS") + +# Optimizations for Docker build +if [[ -f /.dockerenv ]] +then + PIP_ARGS+=("--no-cache-dir") +fi + +# Install python modules from requirements.txt via pip +INSTALL_STRING="python3 -m pip install ${PIP_ARGS[*]}" +echo "executing command: $INSTALL_STRING" +eval "$INSTALL_STRING" diff --git a/scripts/dependencies/os/rocky.sh b/scripts/dependencies/os/rocky.sh new file mode 100644 index 00000000..7207a1cf --- /dev/null +++ b/scripts/dependencies/os/rocky.sh @@ -0,0 +1,50 @@ +#! /usr/bin/env bash +set -e + +# Build Dependencies +BUILD_DEPS=() +BUILD_DEPS+=('python3.11') +BUILD_DEPS+=('git') +BUILD_DEPS+=('sshpass') + +# Optimizations for Docker build +if [[ -f /.dockerenv ]] +then + BUILD_DEPS+=('--nodocs') + BUILD_DEPS+=('--noplugins') + BUILD_DEPS+=('--setopt=install_weak_deps=0') +fi + +# Detect package installer +INSTALLER_BIN="" + +if [[ -f '/usr/bin/dnf' ]] +then + echo "using dnf" + INSTALLER_BIN='dnf' +elif [[ -f '/usr/bin/microdnf' ]] +then + echo "using microdnf" + INSTALLER_BIN='microdnf' +else + # Can't find an appropriate installer + echo "can't find a valid installer" + exit 1 +fi + +INSTALL_STRING="$INSTALLER_BIN install -y ${BUILD_DEPS[*]}" +echo "executing command: $INSTALL_STRING" +eval "$INSTALL_STRING" + +# Farther cleanup if Docker environment +if [[ -f /.dockerenv ]] +then + CLEANUP_STRING="$INSTALLER_BIN clean all" + echo "executing command: $CLEANUP_STRING" + eval "$CLEANUP_STRING" + rm -rf /var/lib/dnf/history* + rm -rf /var/lib/dnf/repos/* + rm -rf /var/lib/rpm/__db* + rm -rf /usr/share/man /usr/share/doc /usr/share/licenses /tmp/* + rm -f /var/log/dnf* +fi diff --git a/scripts/dependencies/python/requirements.txt b/scripts/dependencies/python/requirements.txt new file mode 100644 index 00000000..a2edae62 --- /dev/null +++ b/scripts/dependencies/python/requirements.txt @@ -0,0 +1,7 @@ +ansible>=2.9,<2.10 +jinja2>=2.8 +jmespath>=0.10.0 +junit_xml>=1.9 +netaddr>=0.8.0 +paramiko>=2.7.1 +wheel diff --git a/scripts/docker/rocky8.DOCKERFILE b/scripts/docker/rocky8.DOCKERFILE new file mode 100644 index 00000000..7a6d89da --- /dev/null +++ b/scripts/docker/rocky8.DOCKERFILE @@ -0,0 +1,6 @@ +FROM rockylinux:8-minimal + +ADD scripts/dependencies /dependencies +RUN set -eux \ + && /dependencies/install.sh \ + rm -rf /dependencies