diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 482d91a8b..a29ebc222 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,2 @@ # Global owners of whole repo -* @andrewelamb @GiaJordan @linglp \ No newline at end of file +* @Sage-Bionetworks/schematic-developers diff --git a/.github/workflows/scan_repo.yml b/.github/workflows/scan_repo.yml index 56c7ac35a..582c68f45 100644 --- a/.github/workflows/scan_repo.yml +++ b/.github/workflows/scan_repo.yml @@ -17,7 +17,7 @@ jobs: uses: actions/checkout@v4 - name: Run Trivy vulnerability scanner in repo mode - uses: aquasecurity/trivy-action@master + uses: aquasecurity/trivy-action@0.28.0 with: # the scan targets the file system. scan-type: 'fs' diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3fb546bbe..535d7804f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -134,7 +134,16 @@ jobs: SERVICE_ACCOUNT_CREDS: ${{ secrets.SERVICE_ACCOUNT_CREDS }} run: > poetry run pytest --durations=0 --cov-append --cov-report=term --cov-report=html:htmlcov --cov-report=xml:coverage.xml --cov=schematic/ - -m "not (rule_benchmark)" --reruns 4 -n 8 --ignore=tests/unit + -m "not (rule_benchmark or single_process_execution)" --reruns 4 -n 8 --ignore=tests/unit + + - name: Run integration tests single process + if: ${{ contains(fromJSON('["3.10"]'), matrix.python-version) }} + env: + SYNAPSE_ACCESS_TOKEN: ${{ secrets.SYNAPSE_ACCESS_TOKEN }} + SERVICE_ACCOUNT_CREDS: ${{ secrets.SERVICE_ACCOUNT_CREDS }} + run: > + poetry run pytest --durations=0 --cov-append --cov-report=term --cov-report=html:htmlcov --cov-report=xml:coverage.xml --cov=schematic/ + -m "single_process_execution" --reruns 4 --ignore=tests/unit - name: Upload pytest test results diff --git a/.gitignore b/.gitignore index 6d00e45d3..91f6d7c64 100644 --- a/.gitignore +++ b/.gitignore @@ -164,7 +164,7 @@ clean.sh # Intermediate files data/json_schema_logs/json_schema_log.json great_expectations/checkpoints/manifest_checkpoint.yml -great_expectations/expectations/Manifest_test_suite.json +great_expectations/expectations/Manifest_test_suite*.json tests/data/example.MockComponent.schema.json tests/data/mock_manifests/Invalid_Test_Manifest_censored.csv @@ -177,6 +177,7 @@ tests/data/schema.gpickle # Created during testting Example* manifests/* +https:* # schematic config file config.yml \ No newline at end of file diff --git a/.readthedocs.yml b/.readthedocs.yml index a9472a17d..5e86f8ca8 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -7,14 +7,15 @@ version: 2 # Set the version of Python and other tools you might need build: - os: ubuntu-20.04 + os: ubuntu-22.04 tools: python: "3.9" jobs: post_install: - pip install poetry==1.3.0 - poetry config virtualenvs.create false - - poetry install --with doc + - poetry install --all-extras + - pip install typing-extensions==4.12.2 #Poetry will install my dependencies into the virtualenv created by readthedocs if I set virtualenvs.create=false # You can also specify other tool versions: # nodejs: "16" diff --git a/CONTRIBUTION.md b/CONTRIBUTION.md index 930fbea81..4d8646e6c 100644 --- a/CONTRIBUTION.md +++ b/CONTRIBUTION.md @@ -49,7 +49,7 @@ For new features, bugs, enhancements: #### 4. Pull Request and Review * Create a PR from `develop-` into the develop branch of the upstream repo * Request a code review on the PR -* Once code is approved merge in the develop branch. We suggest creating a merge commit for a cleaner commit history on the `develop` branch. +* Once code is approved merge in the develop branch. The **"Squash and merge"** strategy should be used for a cleaner commit history on the `develop` branch. The description of the squash commit should include enough information to understand the context of the changes that were made. * Once the actions pass on the main branch, delete the `develop-` branch ### Updating readthedocs documentation diff --git a/README.md b/README.md index 72a30b70f..32318c9a4 100644 --- a/README.md +++ b/README.md @@ -23,9 +23,9 @@ - [2a. Set up your virtual environment with `venv`](#2a-set-up-your-virtual-environment-with-venv) - [2b. Set up your virtual environment with `conda`](#2b-set-up-your-virtual-environment-with-conda) - [3. Install `schematic` dependencies](#3-install-schematic-dependencies) - - [4. Set up configuration files](#4-set-up-configuration-files) - - [5. Get your data model as a `JSON-LD` schema file](#5-get-your-data-model-as-a-json-ld-schema-file) - - [6. Obtain Google credential files](#6-obtain-google-credential-files) + - [4. Get your data model as a `JSON-LD` schema file](#4-get-your-data-model-as-a-json-ld-schema-file) + - [5. Obtain Google credential files](#5-obtain-google-credential-files) + - [6. Set up configuration files](#6-set-up-configuration-files) - [7. Verify your setup](#7-verify-your-setup) - [Installation Guide For: Contributors](#installation-guide-for-contributors) - [1. Clone the `schematic` package repository](#1-clone-the-schematic-package-repository) @@ -33,17 +33,20 @@ - [3. Start the virtual environment](#3-start-the-virtual-environment) - [4. Install `schematic` dependencies](#4-install-schematic-dependencies) - [5. Set up configuration files](#5-set-up-configuration-files) - - [6. Obtain Google credential files](#6-obtain-google-credential-files) + - [6. Obtain Google credential files](#6-obtain-google-credential-files-1) - [7. Set up pre-commit hooks](#7-set-up-pre-commit-hooks) - [8. Verify your setup](#8-verify-your-setup) - [Command Line Usage](#command-line-usage) - [Docker Usage](#docker-usage) - - [Running the REST API](#running-the-rest-api) - - [Example 1: Using the `config.yml` path](#example-1-using-the-configyml-path) - - [Example 2: Use environment variables](#example-2-use-environment-variables) - - [Running `schematic` to Validate Manifests](#running-schematic-to-validate-manifests) - - [Example for macOS/Linux](#example-for-macoslinux) - - [Example for Windows](#example-for-windows) + - [Running the REST API](#running-the-rest-api) + - [Example 1: Using the `config.yml` path](#example-1-using-the-configyml-path) + - [Example 2: Use environment variables](#example-2-use-environment-variables) + - [Running `schematic` to Validate Manifests](#running-schematic-to-validate-manifests) + - [Example for macOS/Linux](#example-for-macoslinux) + - [Example for Windows](#example-for-windows) +- [Exporting OpenTelemetry data from schematic](#exporting-opentelemetry-data-from-schematic) + - [Exporting OpenTelemetry data for SageBionetworks employees](#exporting-opentelemetry-data-for-sagebionetworks-employees) + - [Exporting data locally](#exporting-data-locally) - [Contributors](#contributors) @@ -127,7 +130,43 @@ If you run into `ERROR: Failed building wheel for numpy`, the error might be abl pip3 install --upgrade pip ``` -### 4. Set up configuration files +### 4. Get your data model as a `JSON-LD` schema file + +Now you need a schema file, e.g. `model.jsonld`, to have a data model that schematic can work with. While you can download a super basic example data model [here](https://raw.githubusercontent.com/Sage-Bionetworks/schematic/refs/heads/develop/tests/data/example.model.jsonld), you’ll probably be working with a DCC-specific data model. For non-Sage employees/contributors using the CLI, you might care only about the minimum needed artifact, which is the `.jsonld`; locate and download only that from the right repo. + +Here are some example repos with schema files: +* https://github.com/ncihtan/data-models/ +* https://github.com/nf-osi/nf-metadata-dictionary/ + +### 5. Obtain Google credential files + +Any function that interacts with a google sheet (such as `schematic manifest get`) requires google cloud credentials. + +1. **Option 1**: [Here](https://scribehow.com/shared/Get_Credentials_for_Google_Drive_and_Google_Sheets_APIs_to_use_with_schematicpy__yqfcJz_rQVeyTcg0KQCINA?referrer=workspace)’s a step-by-step guide on how to create these credentials in Google Cloud. + * Depending on your institution's policies, your institutional Google account may or may not have the required permissions to complete this. A possible workaround is to use a personal or temporary Google account. + +> [!WARNING] +> At the time of writing, Sage Bionetworks employees do not have the appropriate permissions to create projects with their Sage Bionetworks Google accounts. You would follow instructions using a personal Google account. + +2. **Option 2**: Ask your DCC/development team if they have credentials previously set up with a service account. + +Once you have obtained credentials, be sure that the json file generated is named in the same way as the `service_acct_creds` parameter in your `config.yml` file. You will find more context on the `config.yml` in section [6. Set up configuration files](#6-set-up-configuration-files). + +> [!NOTE] +> Running `schematic init` is no longer supported due to security concerns. To obtain `schematic_service_account_creds.json`, please follow the instructions [here](https://scribehow.com/shared/Enable_Google_Drive_and_Google_Sheets_APIs_for_project__yqfcJz_rQVeyTcg0KQCINA). +schematic uses Google’s API to generate google sheet templates that users fill in to provide (meta)data. +Most Google sheet functionality could be authenticated with service account. However, more complex Google sheet functionality +requires token-based authentication. As browser support that requires the token-based authentication diminishes, we are hoping to deprecate +token-based authentication and keep only service account authentication in the future. + +> [!NOTE] +> Use the ``schematic_service_account_creds.json`` file for the service +> account mode of authentication (*for Google services/APIs*). Service accounts +> are special Google accounts that can be used by applications to access Google APIs +> programmatically via OAuth2.0, with the advantage being that they do not require +> human authorization. + +### 6. Set up configuration files The following section will walk through setting up your configuration files with your credentials to allow for communication between `schematic` and the Synapse API. @@ -163,57 +202,22 @@ such as the Synapse ID of the main file view containing all your project assets, Download the `config_example.yml` as a new file called `config.yml` and modify its contents according to your use case. -For example, if you wanted to change the folder where manifests are downloaded your config should look like: +For example, one of the components in this `config.yml` that will likely be modified is the location of your schema. After acquiring your schema file using the +instructions in step [4. Get your data model as a `JSON-LD` schema file](#4-get-your-data-model-as-a-json-ld-schema-file), your `config.yml` should contain something like: ```text -manifest: - manifest_folder: "my_manifest_folder_path" +model: + location: "path/to/your/model.jsonld" ``` > [!IMPORTANT] -> Be sure to update your `config.yml` with the location of your `.synapseConfig` created in the step above, to avoid authentication errors. Paths can be specified relative to the `config.yml` file or as absolute paths. - -> [!NOTE] -> `config.yml` is ignored by git. - -### 5. Get your data model as a `JSON-LD` schema file - -Now you need a schema file, e.g. `model.jsonld`, to have a data model that schematic can work with. While you can download a super basic example data model [here](https://raw.githubusercontent.com/Sage-Bionetworks/schematic/refs/heads/develop/tests/data/example.model.jsonld), you’ll probably be working with a DCC-specific data model. For non-Sage employees/contributors using the CLI, you might care only about the minimum needed artifact, which is the `.jsonld`; locate and download only that from the right repo. - -Here are some example repos with schema files: -* https://github.com/ncihtan/data-models/ -* https://github.com/nf-osi/nf-metadata-dictionary/ +> Please note that for the example above, your local working directory would typically have `model.jsonld` and `config.yml` side-by-side. The path to your data model should match what is in `config.yml`. > [!IMPORTANT] -> Your local working directory would typically have `model.jsonld` and `config.yml` side-by-side. The path to your data model should match what is in `config.yml` - -### 6. Obtain Google credential files - -Any function that interacts with a google sheet (such as `schematic manifest get`) requires google cloud credentials. - -1. **Option 1**: [Here](https://scribehow.com/shared/Get_Credentials_for_Google_Drive_and_Google_Sheets_APIs_to_use_with_schematicpy__yqfcJz_rQVeyTcg0KQCINA?referrer=workspace)’s a step-by-step guide on how to create these credentials in Google Cloud. - * Depending on your institution's policies, your institutional Google account may or may not have the required permissions to complete this. A possible workaround is to use a personal or temporary Google account. - -> [!WARNING] -> At the time of writing, Sage Bionetworks employees do not have the appropriate permissions to create projects with their Sage Bionetworks Google accounts. You would follow instructions using a personal Google account. - -2. **Option 2**: Ask your DCC/development team if they have credentials previously set up with a service account. - -Once you have obtained credentials, be sure that the json file generated is named in the same way as the `service_acct_creds` parameter in your `config.yml` file. - -> [!NOTE] -> Running `schematic init` is no longer supported due to security concerns. To obtain `schematic_service_account_creds.json`, please follow the instructions [here](https://scribehow.com/shared/Enable_Google_Drive_and_Google_Sheets_APIs_for_project__yqfcJz_rQVeyTcg0KQCINA). -schematic uses Google’s API to generate google sheet templates that users fill in to provide (meta)data. -Most Google sheet functionality could be authenticated with service account. However, more complex Google sheet functionality -requires token-based authentication. As browser support that requires the token-based authentication diminishes, we are hoping to deprecate -token-based authentication and keep only service account authentication in the future. +> Be sure to update your `config.yml` with the location of your `.synapseConfig` created in the step above, to avoid authentication errors. Paths can be specified relative to the `config.yml` file or as absolute paths. > [!NOTE] -> Use the ``schematic_service_account_creds.json`` file for the service -> account mode of authentication (*for Google services/APIs*). Service accounts -> are special Google accounts that can be used by applications to access Google APIs -> programmatically via OAuth2.0, with the advantage being that they do not require -> human authorization. +> `config.yml` is ignored by git. ### 7. Verify your setup After running the steps above, your setup is complete, and you can test it on a `python` instance or by running a command based on the examples in the [Command Line Usage](#command-line-usage) section. @@ -269,12 +273,17 @@ poetry debug info Before you begin, make sure you are in the latest `develop` of the repository. -The following command will install the dependencies based on what we specify in the `poetry.lock` file of this repository. If this step is taking a long time, try to go back to Step 2 and check your version of `poetry`. Alternatively, you can try deleting the lock file and regenerate it by doing `poetry install` (Please note this method should be used as a last resort because this would force other developers to change their development environment) +The following command will install the dependencies based on what we specify in the `poetry.lock` file of this repository (which is generated from the libraries listed in the `pyproject.toml` file). If this step is taking a long time, try to go back to Step 2 and check your version of `poetry`. Alternatively, you can try deleting the lock file and regenerate it by doing `poetry lock` (Please note this method should be used as a last resort because this would force other developers to change their development environment). ``` -poetry install --all-extras +poetry install --with dev,doc ``` +This command will install: +* The main dependencies required for running the package. +* Development dependencies for testing, linting, and code formatting. +* Documentation dependencies such as `sphinx` for building and maintaining documentation. + ### 5. Set up configuration files The following section will walk through setting up your configuration files with your credentials to allow for communication between `schematic` and the Synapse API. @@ -481,6 +490,67 @@ docker run -v %cd%:/schematic \ -c config.yml validate -mp tests/data/mock_manifests/inValid_Test_Manifest.csv -dt MockComponent -js /schematic/data/example.model.jsonld ``` +# Exporting OpenTelemetry data from schematic +This section is geared towards the SageBionetworks specific deployment of schematic as +an API server running in the Sage specific AWS account. + + +Schematic is setup to produce and export OpenTelemetry data while requests are flowing +through the application code. This may be accomplished by setting a few environment +variables wherever the application is running. Those variables are: + +- `TRACING_EXPORT_FORMAT`: Determines in what format traces will be exported. Supported values: [`otlp`]. +- `LOGGING_EXPORT_FORMAT`: Determines in what format logs will be exported. Supported values: [`otlp`]. +- `TRACING_SERVICE_NAME`: The name of the service to attach for all exported traces. +- `LOGGING_SERVICE_NAME`: The name of the service to attach for all exported logs. +- `DEPLOYMENT_ENVIRONMENT`: The name of the environment to attach for all exported telemetry data. +- `OTEL_EXPORTER_OTLP_ENDPOINT`: The endpoint to export telemetry data to. + +Authentication (Oauth2 client credential exchange): + +Used in cases where an intermediate opentelemetry collector is not, or can not be used. +This option is not preferred over using an intermediate opentelemetry collector, but is +left in the code to show how we may export telemetry data with an authorization header +deried from an oauth2 client credential exchange flow. + +- `TELEMETRY_EXPORTER_CLIENT_ID`: The ID of the client to use when executing the OAuth2.0 "Client Credentials" flow. +- `TELEMETRY_EXPORTER_CLIENT_SECRET`: The Secret of the client to use when executing the OAuth2.0 "Client Credentials" flow. +- `TELEMETRY_EXPORTER_CLIENT_TOKEN_ENDPOINT`: The Token endpoint to use when executing the OAuth2.0 "Client Credentials" flow. +- `TELEMETRY_EXPORTER_CLIENT_AUDIENCE`: The ID of the API server to use when executing the OAuth2.0 "Client Credentials" flow. + +Authentication (Static Bearer token) + +- `OTEL_EXPORTER_OTLP_HEADERS`: Used for developers to set a static Bearer token to be used when exporting telemetry data. + +The above configuration will work when the application is running locally, in a +container, running in AWS, or running via CLI. The important part is that the +environment variables are set before the code executes, as the configuration is setup +when the code runs. + +## Exporting OpenTelemetry data for SageBionetworks employees +The DPE (Data Processing & Engineering) team is responsible for maintaining and giving +out the above sensitive information. Please reach out to the DPE team if a new ID/Secret +is needed in order to export telemetry data in a new environment, or locally during +development. + +### Exporting data locally +In order to conserve the number of monthly token requests that can be made the following +process should be followed instead of setting the `TELEMETRY_EXPORTER_CLIENT_*` +environment variables above. + +1) Request access to a unique client ID/Secret that identifies you from DPE. +2) Retrieve a token that must be refreshed every 24 hours via cURL. The specific values will be given when the token is requested. Example: +``` +curl --request POST \ + --url https://TOKEN_URL.us.auth0.com/oauth/token \ + --header 'content-type: application/json' \ + --data '{"client_id":"...","client_secret":"...","audience":"...","grant_type":"client_credentials"}' +``` +3) Set an environment variable in your `.env` file like: `OTEL_EXPORTER_OTLP_HEADERS=Authorization=Bearer ey...` + +If you fail to create a new access token after 24 hours you will see HTTP 403 JWT +Expired messages when the application attempts to export telemetry data. + # Contributors Main contributors and developers: diff --git a/env.example b/env.example index 176c22c28..f1f56a8a4 100644 --- a/env.example +++ b/env.example @@ -5,9 +5,37 @@ SERVER_DOMAIN=localhost USE_LISTEN_PORT=81 SERVICE_ACCOUNT_CREDS='Provide service account creds' -# Integration testing variables (Optional) +## Opentelemetry configuration variables (Optional) +## Only otlp is supported # TRACING_EXPORT_FORMAT=otlp # LOGGING_EXPORT_FORMAT=otlp -# TRACING_SERVICE_NAME=unique-name-testing -# LOGGING_SERVICE_NAME=unique-name-testing -# LOGGING_INSTANCE_NAME=unique-name-testing \ No newline at end of file +# TRACING_SERVICE_NAME=schematic-api +# LOGGING_SERVICE_NAME=schematic-api +## Other examples: dev, staging, prod +# DEPLOYMENT_ENVIRONMENT=local +# OTEL_EXPORTER_OTLP_ENDPOINT=https://..../telemetry +## Opentelemetry authentication +# TELEMETRY_EXPORTER_CLIENT_ID=... +# TELEMETRY_EXPORTER_CLIENT_SECRET-... +# TELEMETRY_EXPORTER_CLIENT_TOKEN_ENDPOINT=... +# TELEMETRY_EXPORTER_CLIENT_AUDIENCE=... +## Alternative Opentelemetry authentication: Sets a static Authorization header to use for all requests. Used when developing locally +# OTEL_EXPORTER_OTLP_HEADERS=Authorization=Bearer ey... + + +# Used during integration test run to determine if files will be output for manual +# inspection. These tests cannot fully finish all validation via code. All of these +# tests will be marked by pytest "manual_verification_required" +# More information: https://sagebionetworks.jira.com/wiki/spaces/SCHEM/pages/3055779846/Schematic+API+test+plan +MANUAL_TEST_VERIFICATION=false + +# Used to determine if a local flask instance is created during integration testing. If +# this is true schematic tests will use a schematic API server running outside of the +# context of the integration test. The url used is defined below. +USE_DEPLOYED_SCHEMATIC_API_SERVER=false + +# The URL used to execute integration tests for schematic API. Defaults to localhost. +# dev: https://schematic-dev.api.sagebionetworks.org +# staging: https://schematic-staging.api.sagebionetworks.org +# prod: https://schematic.api.sagebionetworks.org +SCHEMATIC_API_SERVER_URL=http://localhost:3001 \ No newline at end of file diff --git a/main.py b/main.py index 8081a7578..f5b51bcac 100644 --- a/main.py +++ b/main.py @@ -1,7 +1,4 @@ import os -import connexion -from schematic import CONFIG -from flask_cors import CORS from schematic_api.api import app diff --git a/poetry.lock b/poetry.lock index b6b193d77..0180edc00 100644 --- a/poetry.lock +++ b/poetry.lock @@ -35,13 +35,13 @@ dev = ["black", "docutils", "flake8", "ipython", "m2r", "mistune (<2.0.0)", "pyt [[package]] name = "anyio" -version = "4.4.0" +version = "4.6.2.post1" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, + {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, + {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, ] [package.dependencies] @@ -51,9 +51,9 @@ sniffio = ">=1.1" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] [[package]] name = "appnope" @@ -250,17 +250,6 @@ files = [ [package.extras] dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] -[[package]] -name = "backoff" -version = "2.2.1" -description = "Function decoration for backoff and retry" -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, - {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, -] - [[package]] name = "beautifulsoup4" version = "4.12.3" @@ -282,6 +271,20 @@ charset-normalizer = ["charset-normalizer"] html5lib = ["html5lib"] lxml = ["lxml"] +[[package]] +name = "binapy" +version = "0.8.0" +description = "Binary Data manipulation, for humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "binapy-0.8.0-py3-none-any.whl", hash = "sha256:8af1e1e856900ef8b79ef32236e296127c9cf26414ec355982ff7ce5f173504d"}, + {file = "binapy-0.8.0.tar.gz", hash = "sha256:570c5098d42f037ffb3d2e563998f3cff69ad25ca1f43f9c3815432dccd08233"}, +] + +[package.dependencies] +typing-extensions = ">=4.3.0" + [[package]] name = "black" version = "23.12.1" @@ -330,21 +333,20 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "bleach" -version = "6.1.0" +version = "6.2.0" description = "An easy safelist-based HTML-sanitizing tool." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"}, - {file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"}, + {file = "bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e"}, + {file = "bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f"}, ] [package.dependencies] -six = ">=1.9.0" webencodings = "*" [package.extras] -css = ["tinycss2 (>=1.1.0,<1.3)"] +css = ["tinycss2 (>=1.1.0,<1.5)"] [[package]] name = "cachetools" @@ -460,101 +462,116 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.3.2" +version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] [[package]] @@ -589,7 +606,7 @@ click = "*" name = "clickclick" version = "20.10.2" description = "Click utility functions" -optional = true +optional = false python-versions = "*" files = [ {file = "clickclick-20.10.2-py2.py3-none-any.whl", hash = "sha256:c8f33e6d9ec83f68416dd2136a7950125bd256ec39ccc9a85c6e280a16be2bb5"}, @@ -632,7 +649,7 @@ test = ["pytest"] name = "connexion" version = "2.14.1" description = "Connexion - API first applications with OpenAPI/Swagger and Flask" -optional = true +optional = false python-versions = ">=3.6" files = [ {file = "connexion-2.14.1-py2.py3-none-any.whl", hash = "sha256:f343717241b4c4802a694c38fee66fb1693c897fe4ea5a957fa9b3b07caf6394"}, @@ -660,83 +677,73 @@ tests = ["MarkupSafe (>=0.23)", "aiohttp (>=2.3.10,<4)", "aiohttp-jinja2 (>=0.14 [[package]] name = "coverage" -version = "7.6.1" +version = "7.6.4" description = "Code coverage measurement for Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, - {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, - {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, - {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, - {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, - {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, - {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, - {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, - {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, - {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, - {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, - {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, - {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, - {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, - {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, - {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, - {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, - {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, - {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, - {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, - {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, - {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, - {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, - {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, - {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, - {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, - {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, - {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, - {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, - {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, + {file = "coverage-7.6.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5f8ae553cba74085db385d489c7a792ad66f7f9ba2ee85bfa508aeb84cf0ba07"}, + {file = "coverage-7.6.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8165b796df0bd42e10527a3f493c592ba494f16ef3c8b531288e3d0d72c1f6f0"}, + {file = "coverage-7.6.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7c8b95bf47db6d19096a5e052ffca0a05f335bc63cef281a6e8fe864d450a72"}, + {file = "coverage-7.6.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ed9281d1b52628e81393f5eaee24a45cbd64965f41857559c2b7ff19385df51"}, + {file = "coverage-7.6.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0809082ee480bb8f7416507538243c8863ac74fd8a5d2485c46f0f7499f2b491"}, + {file = "coverage-7.6.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d541423cdd416b78626b55f123412fcf979d22a2c39fce251b350de38c15c15b"}, + {file = "coverage-7.6.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58809e238a8a12a625c70450b48e8767cff9eb67c62e6154a642b21ddf79baea"}, + {file = "coverage-7.6.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c9b8e184898ed014884ca84c70562b4a82cbc63b044d366fedc68bc2b2f3394a"}, + {file = "coverage-7.6.4-cp310-cp310-win32.whl", hash = "sha256:6bd818b7ea14bc6e1f06e241e8234508b21edf1b242d49831831a9450e2f35fa"}, + {file = "coverage-7.6.4-cp310-cp310-win_amd64.whl", hash = "sha256:06babbb8f4e74b063dbaeb74ad68dfce9186c595a15f11f5d5683f748fa1d172"}, + {file = "coverage-7.6.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:73d2b73584446e66ee633eaad1a56aad577c077f46c35ca3283cd687b7715b0b"}, + {file = "coverage-7.6.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:51b44306032045b383a7a8a2c13878de375117946d68dcb54308111f39775a25"}, + {file = "coverage-7.6.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3fb02fe73bed561fa12d279a417b432e5b50fe03e8d663d61b3d5990f29546"}, + {file = "coverage-7.6.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed8fe9189d2beb6edc14d3ad19800626e1d9f2d975e436f84e19efb7fa19469b"}, + {file = "coverage-7.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b369ead6527d025a0fe7bd3864e46dbee3aa8f652d48df6174f8d0bac9e26e0e"}, + {file = "coverage-7.6.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ade3ca1e5f0ff46b678b66201f7ff477e8fa11fb537f3b55c3f0568fbfe6e718"}, + {file = "coverage-7.6.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:27fb4a050aaf18772db513091c9c13f6cb94ed40eacdef8dad8411d92d9992db"}, + {file = "coverage-7.6.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4f704f0998911abf728a7783799444fcbbe8261c4a6c166f667937ae6a8aa522"}, + {file = "coverage-7.6.4-cp311-cp311-win32.whl", hash = "sha256:29155cd511ee058e260db648b6182c419422a0d2e9a4fa44501898cf918866cf"}, + {file = "coverage-7.6.4-cp311-cp311-win_amd64.whl", hash = "sha256:8902dd6a30173d4ef09954bfcb24b5d7b5190cf14a43170e386979651e09ba19"}, + {file = "coverage-7.6.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:12394842a3a8affa3ba62b0d4ab7e9e210c5e366fbac3e8b2a68636fb19892c2"}, + {file = "coverage-7.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2b6b4c83d8e8ea79f27ab80778c19bc037759aea298da4b56621f4474ffeb117"}, + {file = "coverage-7.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d5b8007f81b88696d06f7df0cb9af0d3b835fe0c8dbf489bad70b45f0e45613"}, + {file = "coverage-7.6.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b57b768feb866f44eeed9f46975f3d6406380275c5ddfe22f531a2bf187eda27"}, + {file = "coverage-7.6.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5915fcdec0e54ee229926868e9b08586376cae1f5faa9bbaf8faf3561b393d52"}, + {file = "coverage-7.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0b58c672d14f16ed92a48db984612f5ce3836ae7d72cdd161001cc54512571f2"}, + {file = "coverage-7.6.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2fdef0d83a2d08d69b1f2210a93c416d54e14d9eb398f6ab2f0a209433db19e1"}, + {file = "coverage-7.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8cf717ee42012be8c0cb205dbbf18ffa9003c4cbf4ad078db47b95e10748eec5"}, + {file = "coverage-7.6.4-cp312-cp312-win32.whl", hash = "sha256:7bb92c539a624cf86296dd0c68cd5cc286c9eef2d0c3b8b192b604ce9de20a17"}, + {file = "coverage-7.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:1032e178b76a4e2b5b32e19d0fd0abbce4b58e77a1ca695820d10e491fa32b08"}, + {file = "coverage-7.6.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:023bf8ee3ec6d35af9c1c6ccc1d18fa69afa1cb29eaac57cb064dbb262a517f9"}, + {file = "coverage-7.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0ac3d42cb51c4b12df9c5f0dd2f13a4f24f01943627120ec4d293c9181219ba"}, + {file = "coverage-7.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8fe4984b431f8621ca53d9380901f62bfb54ff759a1348cd140490ada7b693c"}, + {file = "coverage-7.6.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5fbd612f8a091954a0c8dd4c0b571b973487277d26476f8480bfa4b2a65b5d06"}, + {file = "coverage-7.6.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dacbc52de979f2823a819571f2e3a350a7e36b8cb7484cdb1e289bceaf35305f"}, + {file = "coverage-7.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:dab4d16dfef34b185032580e2f2f89253d302facba093d5fa9dbe04f569c4f4b"}, + {file = "coverage-7.6.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:862264b12ebb65ad8d863d51f17758b1684560b66ab02770d4f0baf2ff75da21"}, + {file = "coverage-7.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5beb1ee382ad32afe424097de57134175fea3faf847b9af002cc7895be4e2a5a"}, + {file = "coverage-7.6.4-cp313-cp313-win32.whl", hash = "sha256:bf20494da9653f6410213424f5f8ad0ed885e01f7e8e59811f572bdb20b8972e"}, + {file = "coverage-7.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:182e6cd5c040cec0a1c8d415a87b67ed01193ed9ad458ee427741c7d8513d963"}, + {file = "coverage-7.6.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a181e99301a0ae128493a24cfe5cfb5b488c4e0bf2f8702091473d033494d04f"}, + {file = "coverage-7.6.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:df57bdbeffe694e7842092c5e2e0bc80fff7f43379d465f932ef36f027179806"}, + {file = "coverage-7.6.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bcd1069e710600e8e4cf27f65c90c7843fa8edfb4520fb0ccb88894cad08b11"}, + {file = "coverage-7.6.4-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99b41d18e6b2a48ba949418db48159d7a2e81c5cc290fc934b7d2380515bd0e3"}, + {file = "coverage-7.6.4-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1e54712ba3474f34b7ef7a41e65bd9037ad47916ccb1cc78769bae324c01a"}, + {file = "coverage-7.6.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:53d202fd109416ce011578f321460795abfe10bb901b883cafd9b3ef851bacfc"}, + {file = "coverage-7.6.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:c48167910a8f644671de9f2083a23630fbf7a1cb70ce939440cd3328e0919f70"}, + {file = "coverage-7.6.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:cc8ff50b50ce532de2fa7a7daae9dd12f0a699bfcd47f20945364e5c31799fef"}, + {file = "coverage-7.6.4-cp313-cp313t-win32.whl", hash = "sha256:b8d3a03d9bfcaf5b0141d07a88456bb6a4c3ce55c080712fec8418ef3610230e"}, + {file = "coverage-7.6.4-cp313-cp313t-win_amd64.whl", hash = "sha256:f3ddf056d3ebcf6ce47bdaf56142af51bb7fad09e4af310241e9db7a3a8022e1"}, + {file = "coverage-7.6.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9cb7fa111d21a6b55cbf633039f7bc2749e74932e3aa7cb7333f675a58a58bf3"}, + {file = "coverage-7.6.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:11a223a14e91a4693d2d0755c7a043db43d96a7450b4f356d506c2562c48642c"}, + {file = "coverage-7.6.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a413a096c4cbac202433c850ee43fa326d2e871b24554da8327b01632673a076"}, + {file = "coverage-7.6.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00a1d69c112ff5149cabe60d2e2ee948752c975d95f1e1096742e6077affd376"}, + {file = "coverage-7.6.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f76846299ba5c54d12c91d776d9605ae33f8ae2b9d1d3c3703cf2db1a67f2c0"}, + {file = "coverage-7.6.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fe439416eb6380de434886b00c859304338f8b19f6f54811984f3420a2e03858"}, + {file = "coverage-7.6.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0294ca37f1ba500667b1aef631e48d875ced93ad5e06fa665a3295bdd1d95111"}, + {file = "coverage-7.6.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6f01ba56b1c0e9d149f9ac85a2f999724895229eb36bd997b61e62999e9b0901"}, + {file = "coverage-7.6.4-cp39-cp39-win32.whl", hash = "sha256:bc66f0bf1d7730a17430a50163bb264ba9ded56739112368ba985ddaa9c3bd09"}, + {file = "coverage-7.6.4-cp39-cp39-win_amd64.whl", hash = "sha256:c481b47f6b5845064c65a7bc78bc0860e635a9b055af0df46fdf1c58cebf8e8f"}, + {file = "coverage-7.6.4-pp39.pp310-none-any.whl", hash = "sha256:3c65d37f3a9ebb703e710befdc489a38683a5b152242664b973a7b7b22348a4e"}, + {file = "coverage-7.6.4.tar.gz", hash = "sha256:29fc0f17b1d3fea332f8001d4558f8214af7f1d87a345f3a133c901d60347c73"}, ] [package.dependencies] @@ -829,33 +836,37 @@ langdetect = ["langdetect"] [[package]] name = "debugpy" -version = "1.8.5" +version = "1.8.8" description = "An implementation of the Debug Adapter Protocol for Python" optional = false python-versions = ">=3.8" files = [ - {file = "debugpy-1.8.5-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:7e4d594367d6407a120b76bdaa03886e9eb652c05ba7f87e37418426ad2079f7"}, - {file = "debugpy-1.8.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4413b7a3ede757dc33a273a17d685ea2b0c09dbd312cc03f5534a0fd4d40750a"}, - {file = "debugpy-1.8.5-cp310-cp310-win32.whl", hash = "sha256:dd3811bd63632bb25eda6bd73bea8e0521794cda02be41fa3160eb26fc29e7ed"}, - {file = "debugpy-1.8.5-cp310-cp310-win_amd64.whl", hash = "sha256:b78c1250441ce893cb5035dd6f5fc12db968cc07f91cc06996b2087f7cefdd8e"}, - {file = "debugpy-1.8.5-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:606bccba19f7188b6ea9579c8a4f5a5364ecd0bf5a0659c8a5d0e10dcee3032a"}, - {file = "debugpy-1.8.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db9fb642938a7a609a6c865c32ecd0d795d56c1aaa7a7a5722d77855d5e77f2b"}, - {file = "debugpy-1.8.5-cp311-cp311-win32.whl", hash = "sha256:4fbb3b39ae1aa3e5ad578f37a48a7a303dad9a3d018d369bc9ec629c1cfa7408"}, - {file = "debugpy-1.8.5-cp311-cp311-win_amd64.whl", hash = "sha256:345d6a0206e81eb68b1493ce2fbffd57c3088e2ce4b46592077a943d2b968ca3"}, - {file = "debugpy-1.8.5-cp312-cp312-macosx_12_0_universal2.whl", hash = "sha256:5b5c770977c8ec6c40c60d6f58cacc7f7fe5a45960363d6974ddb9b62dbee156"}, - {file = "debugpy-1.8.5-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a65b00b7cdd2ee0c2cf4c7335fef31e15f1b7056c7fdbce9e90193e1a8c8cb"}, - {file = "debugpy-1.8.5-cp312-cp312-win32.whl", hash = "sha256:c9f7c15ea1da18d2fcc2709e9f3d6de98b69a5b0fff1807fb80bc55f906691f7"}, - {file = "debugpy-1.8.5-cp312-cp312-win_amd64.whl", hash = "sha256:28ced650c974aaf179231668a293ecd5c63c0a671ae6d56b8795ecc5d2f48d3c"}, - {file = "debugpy-1.8.5-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:3df6692351172a42af7558daa5019651f898fc67450bf091335aa8a18fbf6f3a"}, - {file = "debugpy-1.8.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cd04a73eb2769eb0bfe43f5bfde1215c5923d6924b9b90f94d15f207a402226"}, - {file = "debugpy-1.8.5-cp38-cp38-win32.whl", hash = "sha256:8f913ee8e9fcf9d38a751f56e6de12a297ae7832749d35de26d960f14280750a"}, - {file = "debugpy-1.8.5-cp38-cp38-win_amd64.whl", hash = "sha256:a697beca97dad3780b89a7fb525d5e79f33821a8bc0c06faf1f1289e549743cf"}, - {file = "debugpy-1.8.5-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:0a1029a2869d01cb777216af8c53cda0476875ef02a2b6ff8b2f2c9a4b04176c"}, - {file = "debugpy-1.8.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84c276489e141ed0b93b0af648eef891546143d6a48f610945416453a8ad406"}, - {file = "debugpy-1.8.5-cp39-cp39-win32.whl", hash = "sha256:ad84b7cde7fd96cf6eea34ff6c4a1b7887e0fe2ea46e099e53234856f9d99a34"}, - {file = "debugpy-1.8.5-cp39-cp39-win_amd64.whl", hash = "sha256:7b0fe36ed9d26cb6836b0a51453653f8f2e347ba7348f2bbfe76bfeb670bfb1c"}, - {file = "debugpy-1.8.5-py2.py3-none-any.whl", hash = "sha256:55919dce65b471eff25901acf82d328bbd5b833526b6c1364bd5133754777a44"}, - {file = "debugpy-1.8.5.zip", hash = "sha256:b2112cfeb34b4507399d298fe7023a16656fc553ed5246536060ca7bd0e668d0"}, + {file = "debugpy-1.8.8-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:e59b1607c51b71545cb3496876544f7186a7a27c00b436a62f285603cc68d1c6"}, + {file = "debugpy-1.8.8-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6531d952b565b7cb2fbd1ef5df3d333cf160b44f37547a4e7cf73666aca5d8d"}, + {file = "debugpy-1.8.8-cp310-cp310-win32.whl", hash = "sha256:b01f4a5e5c5fb1d34f4ccba99a20ed01eabc45a4684f4948b5db17a319dfb23f"}, + {file = "debugpy-1.8.8-cp310-cp310-win_amd64.whl", hash = "sha256:535f4fb1c024ddca5913bb0eb17880c8f24ba28aa2c225059db145ee557035e9"}, + {file = "debugpy-1.8.8-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:c399023146e40ae373753a58d1be0a98bf6397fadc737b97ad612886b53df318"}, + {file = "debugpy-1.8.8-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09cc7b162586ea2171eea055985da2702b0723f6f907a423c9b2da5996ad67ba"}, + {file = "debugpy-1.8.8-cp311-cp311-win32.whl", hash = "sha256:eea8821d998ebeb02f0625dd0d76839ddde8cbf8152ebbe289dd7acf2cdc6b98"}, + {file = "debugpy-1.8.8-cp311-cp311-win_amd64.whl", hash = "sha256:d4483836da2a533f4b1454dffc9f668096ac0433de855f0c22cdce8c9f7e10c4"}, + {file = "debugpy-1.8.8-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:0cc94186340be87b9ac5a707184ec8f36547fb66636d1029ff4f1cc020e53996"}, + {file = "debugpy-1.8.8-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64674e95916e53c2e9540a056e5f489e0ad4872645399d778f7c598eacb7b7f9"}, + {file = "debugpy-1.8.8-cp312-cp312-win32.whl", hash = "sha256:5c6e885dbf12015aed73770f29dec7023cb310d0dc2ba8bfbeb5c8e43f80edc9"}, + {file = "debugpy-1.8.8-cp312-cp312-win_amd64.whl", hash = "sha256:19ffbd84e757a6ca0113574d1bf5a2298b3947320a3e9d7d8dc3377f02d9f864"}, + {file = "debugpy-1.8.8-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:705cd123a773d184860ed8dae99becd879dfec361098edbefb5fc0d3683eb804"}, + {file = "debugpy-1.8.8-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890fd16803f50aa9cb1a9b9b25b5ec321656dd6b78157c74283de241993d086f"}, + {file = "debugpy-1.8.8-cp313-cp313-win32.whl", hash = "sha256:90244598214bbe704aa47556ec591d2f9869ff9e042e301a2859c57106649add"}, + {file = "debugpy-1.8.8-cp313-cp313-win_amd64.whl", hash = "sha256:4b93e4832fd4a759a0c465c967214ed0c8a6e8914bced63a28ddb0dd8c5f078b"}, + {file = "debugpy-1.8.8-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:143ef07940aeb8e7316de48f5ed9447644da5203726fca378f3a6952a50a9eae"}, + {file = "debugpy-1.8.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f95651bdcbfd3b27a408869a53fbefcc2bcae13b694daee5f1365b1b83a00113"}, + {file = "debugpy-1.8.8-cp38-cp38-win32.whl", hash = "sha256:26b461123a030e82602a750fb24d7801776aa81cd78404e54ab60e8b5fecdad5"}, + {file = "debugpy-1.8.8-cp38-cp38-win_amd64.whl", hash = "sha256:f3cbf1833e644a3100eadb6120f25be8a532035e8245584c4f7532937edc652a"}, + {file = "debugpy-1.8.8-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:53709d4ec586b525724819dc6af1a7703502f7e06f34ded7157f7b1f963bb854"}, + {file = "debugpy-1.8.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a9c013077a3a0000e83d97cf9cc9328d2b0bbb31f56b0e99ea3662d29d7a6a2"}, + {file = "debugpy-1.8.8-cp39-cp39-win32.whl", hash = "sha256:ffe94dd5e9a6739a75f0b85316dc185560db3e97afa6b215628d1b6a17561cb2"}, + {file = "debugpy-1.8.8-cp39-cp39-win_amd64.whl", hash = "sha256:5c0e5a38c7f9b481bf31277d2f74d2109292179081f11108e668195ef926c0f9"}, + {file = "debugpy-1.8.8-py2.py3-none-any.whl", hash = "sha256:ec684553aba5b4066d4de510859922419febc710df7bba04fe9e7ef3de15d34f"}, + {file = "debugpy-1.8.8.zip", hash = "sha256:e6355385db85cbd666be703a96ab7351bc9e6c61d694893206f8001e22aee091"}, ] [[package]] @@ -897,29 +908,15 @@ wrapt = ">=1.10,<2" [package.extras] dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] -[[package]] -name = "deprecation" -version = "2.1.0" -description = "A library to handle automated deprecations" -optional = false -python-versions = "*" -files = [ - {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"}, - {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"}, -] - -[package.dependencies] -packaging = "*" - [[package]] name = "dill" -version = "0.3.8" +version = "0.3.9" description = "serialize all of Python" optional = false python-versions = ">=3.8" files = [ - {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, - {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, + {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, + {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, ] [package.extras] @@ -928,13 +925,13 @@ profile = ["gprof2dot (>=2022.7.29)"] [[package]] name = "distlib" -version = "0.3.8" +version = "0.3.9" description = "Distribution utilities" optional = false python-versions = "*" files = [ - {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, - {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, ] [[package]] @@ -961,13 +958,13 @@ files = [ [[package]] name = "et-xmlfile" -version = "1.1.0" +version = "2.0.0" description = "An implementation of lxml.xmlfile for the standard library" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "et_xmlfile-1.1.0-py3-none-any.whl", hash = "sha256:a2ba85d1d6a74ef63837eed693bcb89c3f752169b0e3e7ae5b16ca5e1b3deada"}, - {file = "et_xmlfile-1.1.0.tar.gz", hash = "sha256:8eb9e2bc2f8c97e37a2dc85a09ecdcdec9d8a396530a6d5a33b30b9a92da0c5c"}, + {file = "et_xmlfile-2.0.0-py3-none-any.whl", hash = "sha256:7a91720bc756843502c3b7504c77b8fe44217c85c537d85037f0f536151b2caa"}, + {file = "et_xmlfile-2.0.0.tar.gz", hash = "sha256:dab3f4764309081ce75662649be815c4c9081e88f0837825f90fd28317d4da54"}, ] [[package]] @@ -1028,19 +1025,19 @@ devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benc [[package]] name = "filelock" -version = "3.15.4" +version = "3.16.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, - {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] -typing = ["typing-extensions (>=4.8)"] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2)"] [[package]] name = "flake8" @@ -1062,7 +1059,7 @@ pyflakes = ">=3.1.0,<3.2.0" name = "flask" version = "2.1.3" description = "A simple framework for building complex web applications." -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "Flask-2.1.3-py3-none-any.whl", hash = "sha256:9013281a7402ad527f8fd56375164f3aa021ecfaff89bfe3825346c24f87e04c"}, @@ -1084,7 +1081,7 @@ dotenv = ["python-dotenv"] name = "flask-cors" version = "3.0.10" description = "A Flask extension adding a decorator for CORS support" -optional = true +optional = false python-versions = "*" files = [ {file = "Flask-Cors-3.0.10.tar.gz", hash = "sha256:b60839393f3b84a0f3746f6cdca56c1ad7426aa738b70d6c61375857823181de"}, @@ -1095,24 +1092,6 @@ files = [ Flask = ">=0.9" Six = "*" -[[package]] -name = "flask-opentracing" -version = "2.0.0" -description = "OpenTracing support for Flask applications" -optional = true -python-versions = "*" -files = [ - {file = "Flask-OpenTracing-2.0.0.tar.gz", hash = "sha256:4de9db3d4f0d2b506ce3874fc721278d41b2e8b0125ea567164be0100df502fe"}, - {file = "Flask_OpenTracing-2.0.0-py3-none-any.whl", hash = "sha256:e7086ffb3531a518c6e3bf2b365af4a51e56a0922fdd5ebe91c9ddeeda632e70"}, -] - -[package.dependencies] -Flask = "*" -opentracing = ">=2.0,<3" - -[package.extras] -tests = ["flake8", "flake8-quotes", "mock", "pytest", "pytest-cov", "tox"] - [[package]] name = "fqdn" version = "1.5.1" @@ -1124,15 +1103,30 @@ files = [ {file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"}, ] +[[package]] +name = "furl" +version = "2.1.3" +description = "URL manipulation made simple." +optional = false +python-versions = "*" +files = [ + {file = "furl-2.1.3-py2.py3-none-any.whl", hash = "sha256:9ab425062c4217f9802508e45feb4a83e54324273ac4b202f1850363309666c0"}, + {file = "furl-2.1.3.tar.gz", hash = "sha256:5a6188fe2666c484a12159c18be97a1977a71d632ef5bb867ef15f54af39cc4e"}, +] + +[package.dependencies] +orderedmultidict = ">=1.0.1" +six = ">=1.8.0" + [[package]] name = "google-api-core" -version = "2.19.2" +version = "2.23.0" description = "Google API client core library" optional = false python-versions = ">=3.7" files = [ - {file = "google_api_core-2.19.2-py3-none-any.whl", hash = "sha256:53ec0258f2837dd53bbd3d3df50f5359281b3cc13f800c941dd15a9b5a415af4"}, - {file = "google_api_core-2.19.2.tar.gz", hash = "sha256:ca07de7e8aa1c98a8bfca9321890ad2340ef7f2eb136e558cee68f24b94b0a8f"}, + {file = "google_api_core-2.23.0-py3-none-any.whl", hash = "sha256:c20100d4c4c41070cf365f1d8ddf5365915291b5eb11b83829fbd1c999b5122f"}, + {file = "google_api_core-2.23.0.tar.gz", hash = "sha256:2ceb087315e6af43f256704b871d99326b1f12a9d6ce99beaedec99ba26a0ace"}, ] [package.dependencies] @@ -1143,6 +1137,7 @@ protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4 requests = ">=2.18.0,<3.0.0.dev0" [package.extras] +async-rest = ["google-auth[aiohttp] (>=2.35.0,<3.0.dev0)"] grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] @@ -1167,13 +1162,13 @@ uritemplate = ">=3.0.1,<5" [[package]] name = "google-auth" -version = "2.34.0" +version = "2.36.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google_auth-2.34.0-py2.py3-none-any.whl", hash = "sha256:72fd4733b80b6d777dcde515628a9eb4a577339437012874ea286bca7261ee65"}, - {file = "google_auth-2.34.0.tar.gz", hash = "sha256:8eb87396435c19b20d32abd2f984e31c191a15284af72eb922f10e5bde9c04cc"}, + {file = "google_auth-2.36.0-py2.py3-none-any.whl", hash = "sha256:51a15d47028b66fd36e5c64a82d2d57480075bccc7da37cde257fc94177a61fb"}, + {file = "google_auth-2.36.0.tar.gz", hash = "sha256:545e9618f2df0bcbb7dcbc45a546485b1212624716975a1ea5ae8149ce769ab1"}, ] [package.dependencies] @@ -1223,13 +1218,13 @@ tool = ["click (>=6.0.0)"] [[package]] name = "googleapis-common-protos" -version = "1.65.0" +version = "1.66.0" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" files = [ - {file = "googleapis_common_protos-1.65.0-py2.py3-none-any.whl", hash = "sha256:2972e6c496f435b92590fd54045060867f3fe9be2c82ab148fc8885035479a63"}, - {file = "googleapis_common_protos-1.65.0.tar.gz", hash = "sha256:334a29d07cddc3aa01dee4988f9afd9b2916ee2ff49d6b757155dc0d197852c0"}, + {file = "googleapis_common_protos-1.66.0-py2.py3-none-any.whl", hash = "sha256:d7abcd75fabb2e0ec9f74466401f6c119a0b498e27370e9be4c94cb7e382b8ed"}, + {file = "googleapis_common_protos-1.66.0.tar.gz", hash = "sha256:c3e7b33d15fdca5374cc0a7346dd92ffa847425cc4ea941d970f13680052ec8c"}, ] [package.dependencies] @@ -1328,135 +1323,6 @@ test = ["black[jupyter] (==22.3.0)", "boto3 (==1.17.106)", "docstring-parser (== trino = ["sqlalchemy (>=1.3.18,<2.0.0)", "trino (>=0.310.0,!=0.316.0)"] vertica = ["sqlalchemy (>=1.3.18,<2.0.0)", "sqlalchemy-vertica-python (>=0.5.10)"] -[[package]] -name = "greenlet" -version = "3.0.3" -description = "Lightweight in-process concurrent programming" -optional = false -python-versions = ">=3.7" -files = [ - {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, - {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, - {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, - {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, - {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, - {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, - {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, - {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, - {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, - {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, - {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, - {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, - {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, - {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, - {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, - {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, -] - -[package.extras] -docs = ["Sphinx", "furo"] -test = ["objgraph", "psutil"] - -[[package]] -name = "grpcio" -version = "1.66.1" -description = "HTTP/2-based RPC framework" -optional = true -python-versions = ">=3.8" -files = [ - {file = "grpcio-1.66.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:4877ba180591acdf127afe21ec1c7ff8a5ecf0fe2600f0d3c50e8c4a1cbc6492"}, - {file = "grpcio-1.66.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:3750c5a00bd644c75f4507f77a804d0189d97a107eb1481945a0cf3af3e7a5ac"}, - {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:a013c5fbb12bfb5f927444b477a26f1080755a931d5d362e6a9a720ca7dbae60"}, - {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1b24c23d51a1e8790b25514157d43f0a4dce1ac12b3f0b8e9f66a5e2c4c132f"}, - {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7ffb8ea674d68de4cac6f57d2498fef477cef582f1fa849e9f844863af50083"}, - {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:307b1d538140f19ccbd3aed7a93d8f71103c5d525f3c96f8616111614b14bf2a"}, - {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1c17ebcec157cfb8dd445890a03e20caf6209a5bd4ac5b040ae9dbc59eef091d"}, - {file = "grpcio-1.66.1-cp310-cp310-win32.whl", hash = "sha256:ef82d361ed5849d34cf09105d00b94b6728d289d6b9235513cb2fcc79f7c432c"}, - {file = "grpcio-1.66.1-cp310-cp310-win_amd64.whl", hash = "sha256:292a846b92cdcd40ecca46e694997dd6b9be6c4c01a94a0dfb3fcb75d20da858"}, - {file = "grpcio-1.66.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:c30aeceeaff11cd5ddbc348f37c58bcb96da8d5aa93fed78ab329de5f37a0d7a"}, - {file = "grpcio-1.66.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8a1e224ce6f740dbb6b24c58f885422deebd7eb724aff0671a847f8951857c26"}, - {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a66fe4dc35d2330c185cfbb42959f57ad36f257e0cc4557d11d9f0a3f14311df"}, - {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3ba04659e4fce609de2658fe4dbf7d6ed21987a94460f5f92df7579fd5d0e22"}, - {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4573608e23f7e091acfbe3e84ac2045680b69751d8d67685ffa193a4429fedb1"}, - {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7e06aa1f764ec8265b19d8f00140b8c4b6ca179a6dc67aa9413867c47e1fb04e"}, - {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3885f037eb11f1cacc41f207b705f38a44b69478086f40608959bf5ad85826dd"}, - {file = "grpcio-1.66.1-cp311-cp311-win32.whl", hash = "sha256:97ae7edd3f3f91480e48ede5d3e7d431ad6005bfdbd65c1b56913799ec79e791"}, - {file = "grpcio-1.66.1-cp311-cp311-win_amd64.whl", hash = "sha256:cfd349de4158d797db2bd82d2020554a121674e98fbe6b15328456b3bf2495bb"}, - {file = "grpcio-1.66.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:a92c4f58c01c77205df6ff999faa008540475c39b835277fb8883b11cada127a"}, - {file = "grpcio-1.66.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fdb14bad0835914f325349ed34a51940bc2ad965142eb3090081593c6e347be9"}, - {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f03a5884c56256e08fd9e262e11b5cfacf1af96e2ce78dc095d2c41ccae2c80d"}, - {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ca2559692d8e7e245d456877a85ee41525f3ed425aa97eb7a70fc9a79df91a0"}, - {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84ca1be089fb4446490dd1135828bd42a7c7f8421e74fa581611f7afdf7ab761"}, - {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:d639c939ad7c440c7b2819a28d559179a4508783f7e5b991166f8d7a34b52815"}, - {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b9feb4e5ec8dc2d15709f4d5fc367794d69277f5d680baf1910fc9915c633524"}, - {file = "grpcio-1.66.1-cp312-cp312-win32.whl", hash = "sha256:7101db1bd4cd9b880294dec41a93fcdce465bdbb602cd8dc5bd2d6362b618759"}, - {file = "grpcio-1.66.1-cp312-cp312-win_amd64.whl", hash = "sha256:b0aa03d240b5539648d996cc60438f128c7f46050989e35b25f5c18286c86734"}, - {file = "grpcio-1.66.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:ecfe735e7a59e5a98208447293ff8580e9db1e890e232b8b292dc8bd15afc0d2"}, - {file = "grpcio-1.66.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4825a3aa5648010842e1c9d35a082187746aa0cdbf1b7a2a930595a94fb10fce"}, - {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:f517fd7259fe823ef3bd21e508b653d5492e706e9f0ef82c16ce3347a8a5620c"}, - {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1fe60d0772831d96d263b53d83fb9a3d050a94b0e94b6d004a5ad111faa5b5b"}, - {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31a049daa428f928f21090403e5d18ea02670e3d5d172581670be006100db9ef"}, - {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f914386e52cbdeb5d2a7ce3bf1fdfacbe9d818dd81b6099a05b741aaf3848bb"}, - {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bff2096bdba686019fb32d2dde45b95981f0d1490e054400f70fc9a8af34b49d"}, - {file = "grpcio-1.66.1-cp38-cp38-win32.whl", hash = "sha256:aa8ba945c96e73de29d25331b26f3e416e0c0f621e984a3ebdb2d0d0b596a3b3"}, - {file = "grpcio-1.66.1-cp38-cp38-win_amd64.whl", hash = "sha256:161d5c535c2bdf61b95080e7f0f017a1dfcb812bf54093e71e5562b16225b4ce"}, - {file = "grpcio-1.66.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:d0cd7050397b3609ea51727b1811e663ffda8bda39c6a5bb69525ef12414b503"}, - {file = "grpcio-1.66.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0e6c9b42ded5d02b6b1fea3a25f036a2236eeb75d0579bfd43c0018c88bf0a3e"}, - {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:c9f80f9fad93a8cf71c7f161778ba47fd730d13a343a46258065c4deb4b550c0"}, - {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dd67ed9da78e5121efc5c510f0122a972216808d6de70953a740560c572eb44"}, - {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48b0d92d45ce3be2084b92fb5bae2f64c208fea8ceed7fccf6a7b524d3c4942e"}, - {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4d813316d1a752be6f5c4360c49f55b06d4fe212d7df03253dfdae90c8a402bb"}, - {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9c9bebc6627873ec27a70fc800f6083a13c70b23a5564788754b9ee52c5aef6c"}, - {file = "grpcio-1.66.1-cp39-cp39-win32.whl", hash = "sha256:30a1c2cf9390c894c90bbc70147f2372130ad189cffef161f0432d0157973f45"}, - {file = "grpcio-1.66.1-cp39-cp39-win_amd64.whl", hash = "sha256:17663598aadbedc3cacd7bbde432f541c8e07d2496564e22b214b22c7523dac8"}, - {file = "grpcio-1.66.1.tar.gz", hash = "sha256:35334f9c9745add3e357e3372756fd32d925bd52c41da97f4dfdafbde0bf0ee2"}, -] - -[package.extras] -protobuf = ["grpcio-tools (>=1.66.1)"] - [[package]] name = "h11" version = "0.14.0" @@ -1470,13 +1336,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.5" +version = "1.0.6" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, - {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, + {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, + {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, ] [package.dependencies] @@ -1487,7 +1353,7 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.26.0)"] +trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httplib2" @@ -1530,13 +1396,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "identify" -version = "2.6.0" +version = "2.6.2" description = "File identification library for Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "identify-2.6.0-py2.py3-none-any.whl", hash = "sha256:e79ae4406387a9d300332b5fd366d8994f1525e8414984e1a59e058b2eda2dd0"}, - {file = "identify-2.6.0.tar.gz", hash = "sha256:cb171c685bdc31bcc4c1734698736a7d5b6c8bf2e0c15117f4d469c8640ae5cf"}, + {file = "identify-2.6.2-py2.py3-none-any.whl", hash = "sha256:c097384259f49e372f4ea00a19719d95ae27dd5ff0fd77ad630aa891306b82f3"}, + {file = "identify-2.6.2.tar.gz", hash = "sha256:fab5c716c24d7a789775228823797296a2994b075fb6080ac83a102772a98cbd"}, ] [package.extras] @@ -1544,15 +1410,18 @@ license = ["ukkonen"] [[package]] name = "idna" -version = "3.8" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" files = [ - {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, - {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "imagesize" version = "1.4.1" @@ -1566,22 +1435,26 @@ files = [ [[package]] name = "importlib-metadata" -version = "6.11.0" +version = "8.5.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-6.11.0-py3-none-any.whl", hash = "sha256:f0afba6205ad8f8947c7d338b5342d5db2afbfd82f9cbef7879a9539cc12eb9b"}, - {file = "importlib_metadata-6.11.0.tar.gz", hash = "sha256:1231cf92d825c9e03cfc4da076a16de6422c863558229ea0b22b675657463443"}, + {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, + {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, ] [package.dependencies] -zipp = ">=0.5" +zipp = ">=3.20" [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] [[package]] name = "inflection" @@ -1605,31 +1478,6 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] -[[package]] -name = "interrogate" -version = "1.7.0" -description = "Interrogate a codebase for docstring coverage." -optional = false -python-versions = ">=3.8" -files = [ - {file = "interrogate-1.7.0-py3-none-any.whl", hash = "sha256:b13ff4dd8403369670e2efe684066de9fcb868ad9d7f2b4095d8112142dc9d12"}, - {file = "interrogate-1.7.0.tar.gz", hash = "sha256:a320d6ec644dfd887cc58247a345054fc4d9f981100c45184470068f4b3719b0"}, -] - -[package.dependencies] -attrs = "*" -click = ">=7.1" -colorama = "*" -py = "*" -tabulate = "*" -tomli = {version = "*", markers = "python_version < \"3.11\""} - -[package.extras] -dev = ["cairosvg", "coverage[toml]", "pre-commit", "pytest", "pytest-cov", "pytest-mock", "sphinx", "sphinx-autobuild", "wheel"] -docs = ["sphinx", "sphinx-autobuild"] -png = ["cairosvg"] -tests = ["coverage[toml]", "pytest", "pytest-cov", "pytest-mock"] - [[package]] name = "ipykernel" version = "6.29.5" @@ -1774,43 +1622,24 @@ files = [ {file = "itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173"}, ] -[[package]] -name = "jaeger-client" -version = "4.8.0" -description = "Jaeger Python OpenTracing Tracer implementation" -optional = true -python-versions = ">=3.7" -files = [ - {file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"}, -] - -[package.dependencies] -opentracing = ">=2.1,<3.0" -threadloop = ">=1,<2" -thrift = "*" -tornado = ">=4.3" - -[package.extras] -tests = ["codecov", "coverage", "flake8", "flake8-quotes", "flake8-typing-imports", "mock", "mypy", "opentracing_instrumentation (>=3,<4)", "prometheus_client (==0.11.0)", "pycurl", "pytest", "pytest-benchmark[histogram]", "pytest-cov", "pytest-localserver", "pytest-timeout", "pytest-tornado", "tchannel (==2.1.0)"] - [[package]] name = "jedi" -version = "0.19.1" +version = "0.19.2" description = "An autocompletion tool for Python that can be used for text editors." optional = false python-versions = ">=3.6" files = [ - {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, - {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, + {file = "jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9"}, + {file = "jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0"}, ] [package.dependencies] -parso = ">=0.8.3,<0.9.0" +parso = ">=0.8.4,<0.9.0" [package.extras] docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] -testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] +testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] [[package]] name = "jinja2" @@ -1831,15 +1660,18 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "json5" -version = "0.9.25" +version = "0.9.28" description = "A Python implementation of the JSON5 data format." optional = false -python-versions = ">=3.8" +python-versions = ">=3.8.0" files = [ - {file = "json5-0.9.25-py3-none-any.whl", hash = "sha256:34ed7d834b1341a86987ed52f3f76cd8ee184394906b6e22a1e0deb9ab294e8f"}, - {file = "json5-0.9.25.tar.gz", hash = "sha256:548e41b9be043f9426776f05df8635a00fe06104ea51ed24b67f908856e151ae"}, + {file = "json5-0.9.28-py3-none-any.whl", hash = "sha256:29c56f1accdd8bc2e037321237662034a7e07921e2b7223281a5ce2c46f0c4df"}, + {file = "json5-0.9.28.tar.gz", hash = "sha256:1f82f36e615bc5b42f1bbd49dbc94b12563c56408c6ffa06414ea310890e9a6e"}, ] +[package.extras] +dev = ["build (==1.2.2.post1)", "coverage (==7.5.3)", "mypy (==1.13.0)", "pip (==24.3.1)", "pylint (==3.2.3)", "ruff (==0.7.3)", "twine (==5.1.1)", "uv (==0.5.1)"] + [[package]] name = "jsonpatch" version = "1.33" @@ -1896,13 +1728,13 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339- [[package]] name = "jsonschema-specifications" -version = "2023.12.1" +version = "2024.10.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, - {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, + {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"}, + {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"}, ] [package.dependencies] @@ -1910,13 +1742,13 @@ referencing = ">=0.31.0" [[package]] name = "jupyter-client" -version = "8.6.2" +version = "8.6.3" description = "Jupyter protocol implementation and client libraries" optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_client-8.6.2-py3-none-any.whl", hash = "sha256:50cbc5c66fd1b8f65ecb66bc490ab73217993632809b6e505687de18e9dea39f"}, - {file = "jupyter_client-8.6.2.tar.gz", hash = "sha256:2bda14d55ee5ba58552a8c53ae43d215ad9868853489213f37da060ced54d8df"}, + {file = "jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f"}, + {file = "jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419"}, ] [package.dependencies] @@ -2129,6 +1961,22 @@ files = [ {file = "jupyterlab_widgets-3.0.13.tar.gz", hash = "sha256:a2966d385328c1942b683a8cd96b89b8dd82c8b8f81dda902bb2bc06d46f5bed"}, ] +[[package]] +name = "jwskate" +version = "0.11.1" +description = "A Pythonic implementation of the JOSE / JSON Web Crypto related RFCs (JWS, JWK, JWA, JWT, JWE)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jwskate-0.11.1-py3-none-any.whl", hash = "sha256:cdfa04fac10366afab08c20d2f75d1c6b57dc7d099b407b8fb4318349272f933"}, + {file = "jwskate-0.11.1.tar.gz", hash = "sha256:35354b487c8e835fdd57befea5e93e9e52fe25869d884fc764511d22061e6685"}, +] + +[package.dependencies] +binapy = ">=0.8" +cryptography = ">=3.4" +typing-extensions = ">=4.3" + [[package]] name = "lazy-object-proxy" version = "1.10.0" @@ -2177,102 +2025,103 @@ files = [ [[package]] name = "makefun" -version = "1.15.4" +version = "1.15.6" description = "Small library to dynamically create python functions." optional = false python-versions = "*" files = [ - {file = "makefun-1.15.4-py2.py3-none-any.whl", hash = "sha256:945d078a7e01a903f2cbef738b33e0ebc52b8d35fb7e20c528ed87b5c80db5b7"}, - {file = "makefun-1.15.4.tar.gz", hash = "sha256:9f9b9904e7c397759374a88f4c57781fbab2a458dec78df4b3ee6272cd9fb010"}, + {file = "makefun-1.15.6-py2.py3-none-any.whl", hash = "sha256:e69b870f0bb60304765b1e3db576aaecf2f9b3e5105afe8cfeff8f2afe6ad067"}, + {file = "makefun-1.15.6.tar.gz", hash = "sha256:26bc63442a6182fb75efed8b51741dd2d1db2f176bec8c64e20a586256b8f149"}, ] [[package]] name = "markupsafe" -version = "2.1.5" +version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" files = [ - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, - {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, ] [[package]] name = "marshmallow" -version = "3.22.0" +version = "3.23.1" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, - {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, + {file = "marshmallow-3.23.1-py3-none-any.whl", hash = "sha256:fece2eb2c941180ea1b7fcbd4a83c51bfdd50093fdd3ad2585ee5e1df2508491"}, + {file = "marshmallow-3.23.1.tar.gz", hash = "sha256:3a8dfda6edd8dcdbf216c0ede1d1e78d230a6dc9c5a088f58c4083b974a0d468"}, ] [package.dependencies] packaging = ">=17.0" [package.extras] -dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] -docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] -tests = ["pytest", "pytz", "simplejson"] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"] +docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.14)", "sphinx (==8.1.3)", "sphinx-issues (==5.0.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "simplejson"] [[package]] name = "matplotlib-inline" @@ -2312,38 +2161,43 @@ files = [ [[package]] name = "mypy" -version = "1.11.2" +version = "1.13.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"}, - {file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"}, - {file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"}, - {file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"}, - {file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"}, - {file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"}, - {file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"}, - {file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"}, - {file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"}, - {file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"}, - {file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"}, - {file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"}, - {file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"}, - {file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"}, - {file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"}, - {file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"}, - {file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"}, - {file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"}, - {file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"}, - {file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"}, - {file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"}, - {file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"}, - {file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"}, - {file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"}, - {file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"}, - {file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"}, - {file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, + {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, + {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, + {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, + {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, + {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, + {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, + {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, + {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, + {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, + {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, + {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, + {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, + {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, + {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, + {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, + {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, + {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, + {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, + {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, + {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, ] [package.dependencies] @@ -2353,6 +2207,7 @@ typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] install-types = ["pip"] mypyc = ["setuptools (>=50)"] reports = ["lxml"] @@ -2462,21 +2317,21 @@ files = [ [[package]] name = "networkx" -version = "2.8.8" +version = "3.2.1" description = "Python package for creating and manipulating graphs and networks" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "networkx-2.8.8-py3-none-any.whl", hash = "sha256:e435dfa75b1d7195c7b8378c3859f0445cd88c6b0375c181ed66823a9ceb7524"}, - {file = "networkx-2.8.8.tar.gz", hash = "sha256:230d388117af870fce5647a3c52401fcf753e94720e6ea6b4197a5355648885e"}, + {file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"}, + {file = "networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6"}, ] [package.extras] -default = ["matplotlib (>=3.4)", "numpy (>=1.19)", "pandas (>=1.3)", "scipy (>=1.8)"] -developer = ["mypy (>=0.982)", "pre-commit (>=2.20)"] -doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.2)", "pydata-sphinx-theme (>=0.11)", "sphinx (>=5.2)", "sphinx-gallery (>=0.11)", "texext (>=0.6.6)"] -extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.9)", "sympy (>=1.10)"] -test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] +default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] name = "nodeenv" @@ -2624,135 +2479,252 @@ et-xmlfile = "*" [[package]] name = "opentelemetry-api" -version = "1.21.0" +version = "1.28.1" description = "OpenTelemetry Python API" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_api-1.21.0-py3-none-any.whl", hash = "sha256:4bb86b28627b7e41098f0e93280fe4892a1abed1b79a19aec6f928f39b17dffb"}, - {file = "opentelemetry_api-1.21.0.tar.gz", hash = "sha256:d6185fd5043e000075d921822fd2d26b953eba8ca21b1e2fa360dd46a7686316"}, + {file = "opentelemetry_api-1.28.1-py3-none-any.whl", hash = "sha256:bfe86c95576cf19a914497f439fd79c9553a38de0adbdc26f7cfc46b0c00b16c"}, + {file = "opentelemetry_api-1.28.1.tar.gz", hash = "sha256:6fa7295a12c707f5aebef82da3d9ec5afe6992f3e42bfe7bec0339a44b3518e7"}, ] [package.dependencies] deprecated = ">=1.2.6" -importlib-metadata = ">=6.0,<7.0" +importlib-metadata = ">=6.0,<=8.5.0" [[package]] name = "opentelemetry-exporter-otlp-proto-common" -version = "1.21.0" +version = "1.28.1" description = "OpenTelemetry Protobuf encoding" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_exporter_otlp_proto_common-1.21.0-py3-none-any.whl", hash = "sha256:97b1022b38270ec65d11fbfa348e0cd49d12006485c2321ea3b1b7037d42b6ec"}, - {file = "opentelemetry_exporter_otlp_proto_common-1.21.0.tar.gz", hash = "sha256:61db274d8a68d636fb2ec2a0f281922949361cdd8236e25ff5539edf942b3226"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.28.1-py3-none-any.whl", hash = "sha256:56ea6cf28c90f767733f046a54525dc7271a25faff86b1955e5252b55f4e007f"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.28.1.tar.gz", hash = "sha256:6e55e7f5d59296cc87a74c08b8e0ddf87403f73a62302ec7ee042c1a1f4a8f70"}, ] [package.dependencies] -backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""} -opentelemetry-proto = "1.21.0" +opentelemetry-proto = "1.28.1" [[package]] -name = "opentelemetry-exporter-otlp-proto-grpc" -version = "1.21.0" -description = "OpenTelemetry Collector Protobuf over gRPC Exporter" -optional = true -python-versions = ">=3.7" +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.28.1" +description = "OpenTelemetry Collector Protobuf over HTTP Exporter" +optional = false +python-versions = ">=3.8" files = [ - {file = "opentelemetry_exporter_otlp_proto_grpc-1.21.0-py3-none-any.whl", hash = "sha256:ab37c63d6cb58d6506f76d71d07018eb1f561d83e642a8f5aa53dddf306087a4"}, - {file = "opentelemetry_exporter_otlp_proto_grpc-1.21.0.tar.gz", hash = "sha256:a497c5611245a2d17d9aa1e1cbb7ab567843d53231dcc844a62cea9f0924ffa7"}, + {file = "opentelemetry_exporter_otlp_proto_http-1.28.1-py3-none-any.whl", hash = "sha256:f09a684c7b9d9a451323560c61564345c253c6bb3426f6a94db31ba5f428e778"}, + {file = "opentelemetry_exporter_otlp_proto_http-1.28.1.tar.gz", hash = "sha256:f4c21d380f2dd8ddbe4d456d8728853bc1131eb977bac1d0becc838e2086b506"}, ] [package.dependencies] -backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""} deprecated = ">=1.2.6" googleapis-common-protos = ">=1.52,<2.0" -grpcio = ">=1.0.0,<2.0.0" opentelemetry-api = ">=1.15,<2.0" -opentelemetry-exporter-otlp-proto-common = "1.21.0" -opentelemetry-proto = "1.21.0" -opentelemetry-sdk = ">=1.21.0,<1.22.0" +opentelemetry-exporter-otlp-proto-common = "1.28.1" +opentelemetry-proto = "1.28.1" +opentelemetry-sdk = ">=1.28.1,<1.29.0" +requests = ">=2.7,<3.0" + +[[package]] +name = "opentelemetry-instrumentation" +version = "0.49b1" +description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_instrumentation-0.49b1-py3-none-any.whl", hash = "sha256:0a9d3821736104013693ef3b8a9d29b41f2f3a81ee2d8c9288b52d62bae5747c"}, + {file = "opentelemetry_instrumentation-0.49b1.tar.gz", hash = "sha256:2d0e41181b7957ba061bb436b969ad90545ac3eba65f290830009b4264d2824e"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.4,<2.0" +opentelemetry-semantic-conventions = "0.49b1" +packaging = ">=18.0" +wrapt = ">=1.0.0,<2.0.0" + +[[package]] +name = "opentelemetry-instrumentation-flask" +version = "0.49b1" +description = "Flask instrumentation for OpenTelemetry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_instrumentation_flask-0.49b1-py3-none-any.whl", hash = "sha256:e3abb8aaccb86372bfddaa894fa9b4c6cc8c1ac2e023e0bb64c97f07d9df3d28"}, + {file = "opentelemetry_instrumentation_flask-0.49b1.tar.gz", hash = "sha256:97a91f1539fb841f774fd3e9545b0f11707e4b7d48083aa51e27fb6d527615dc"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.49b1" +opentelemetry-instrumentation-wsgi = "0.49b1" +opentelemetry-semantic-conventions = "0.49b1" +opentelemetry-util-http = "0.49b1" +packaging = ">=21.0" [package.extras] -test = ["pytest-grpc"] +instruments = ["flask (>=1.0)"] [[package]] -name = "opentelemetry-exporter-otlp-proto-http" -version = "1.21.0" -description = "OpenTelemetry Collector Protobuf over HTTP Exporter" +name = "opentelemetry-instrumentation-httpx" +version = "0.49b1" +description = "OpenTelemetry HTTPX Instrumentation" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_exporter_otlp_proto_http-1.21.0-py3-none-any.whl", hash = "sha256:56837773de6fb2714c01fc4895caebe876f6397bbc4d16afddf89e1299a55ee2"}, - {file = "opentelemetry_exporter_otlp_proto_http-1.21.0.tar.gz", hash = "sha256:19d60afa4ae8597f7ef61ad75c8b6c6b7ef8cb73a33fb4aed4dbc86d5c8d3301"}, + {file = "opentelemetry_instrumentation_httpx-0.49b1-py3-none-any.whl", hash = "sha256:7c620c6dd8e5fecddc5a8bb5f5cc1c4c758a031b13703e75cbb8e5abdd4297de"}, + {file = "opentelemetry_instrumentation_httpx-0.49b1.tar.gz", hash = "sha256:82285093b68bf0dc89e424f4c201c9524f0d29b9ba326fb0993721e358617710"}, ] [package.dependencies] -backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""} -deprecated = ">=1.2.6" -googleapis-common-protos = ">=1.52,<2.0" -opentelemetry-api = ">=1.15,<2.0" -opentelemetry-exporter-otlp-proto-common = "1.21.0" -opentelemetry-proto = "1.21.0" -opentelemetry-sdk = ">=1.21.0,<1.22.0" -requests = ">=2.7,<3.0" +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.49b1" +opentelemetry-semantic-conventions = "0.49b1" +opentelemetry-util-http = "0.49b1" +wrapt = ">=1.0.0,<2.0.0" + +[package.extras] +instruments = ["httpx (>=0.18.0)"] + +[[package]] +name = "opentelemetry-instrumentation-requests" +version = "0.49b1" +description = "OpenTelemetry requests instrumentation" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_instrumentation_requests-0.49b1-py3-none-any.whl", hash = "sha256:4a7f8321f9cca5b4da3a96b63dde0c7a41775302f4e9e0267a775f9800efac59"}, + {file = "opentelemetry_instrumentation_requests-0.49b1.tar.gz", hash = "sha256:329726afd607e1078e80bc2fb43741bd73e77c2d36804d9250d965858df8bd36"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.49b1" +opentelemetry-semantic-conventions = "0.49b1" +opentelemetry-util-http = "0.49b1" [package.extras] -test = ["responses (==0.22.0)"] +instruments = ["requests (>=2.0,<3.0)"] + +[[package]] +name = "opentelemetry-instrumentation-threading" +version = "0.49b1" +description = "Thread context propagation support for OpenTelemetry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_instrumentation_threading-0.49b1-py3-none-any.whl", hash = "sha256:c94d4088a4aae9f957e0b91ee0cf1df84644f169ad33fd84d16240cabd2e818d"}, + {file = "opentelemetry_instrumentation_threading-0.49b1.tar.gz", hash = "sha256:faa2402c0f935886cf49d159e6d79b8f48a2d73998d27b8c933bdef53fb2ed1e"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.49b1" +wrapt = ">=1.0.0,<2.0.0" + +[[package]] +name = "opentelemetry-instrumentation-urllib" +version = "0.49b1" +description = "OpenTelemetry urllib instrumentation" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_instrumentation_urllib-0.49b1-py3-none-any.whl", hash = "sha256:0baf74dc2ca613d6d78751f303cc0855580fca166082610e57eed3dc374ca8fb"}, + {file = "opentelemetry_instrumentation_urllib-0.49b1.tar.gz", hash = "sha256:8cba9b9da5071ef4e305a43b12d482dd11d6c9a9807242cfafce31b1aed0d4e8"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.49b1" +opentelemetry-semantic-conventions = "0.49b1" +opentelemetry-util-http = "0.49b1" + +[[package]] +name = "opentelemetry-instrumentation-wsgi" +version = "0.49b1" +description = "WSGI Middleware for OpenTelemetry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_instrumentation_wsgi-0.49b1-py3-none-any.whl", hash = "sha256:6ab07115dc5c38f9c5b368e1ae4d9741cddeeef857ad01b211ee314a72ffdbea"}, + {file = "opentelemetry_instrumentation_wsgi-0.49b1.tar.gz", hash = "sha256:e1dd9a6e10b0a4baa1afd17c75b0836f9e3fd1d40c3d0d5287e898d49436ac34"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.49b1" +opentelemetry-semantic-conventions = "0.49b1" +opentelemetry-util-http = "0.49b1" [[package]] name = "opentelemetry-proto" -version = "1.21.0" +version = "1.28.1" description = "OpenTelemetry Python Proto" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_proto-1.21.0-py3-none-any.whl", hash = "sha256:32fc4248e83eebd80994e13963e683f25f3b443226336bb12b5b6d53638f50ba"}, - {file = "opentelemetry_proto-1.21.0.tar.gz", hash = "sha256:7d5172c29ed1b525b5ecf4ebe758c7138a9224441b3cfe683d0a237c33b1941f"}, + {file = "opentelemetry_proto-1.28.1-py3-none-any.whl", hash = "sha256:cb406ec69f1d11439e60fb43c6b744783fc8ee4deecdab61b3e29f112b0602f9"}, + {file = "opentelemetry_proto-1.28.1.tar.gz", hash = "sha256:6f9e9d9958822ab3e3cdcd2a24806d62aa10282349fd4338aafe32c69c87fc15"}, ] [package.dependencies] -protobuf = ">=3.19,<5.0" +protobuf = ">=5.0,<6.0" [[package]] name = "opentelemetry-sdk" -version = "1.21.0" +version = "1.28.1" description = "OpenTelemetry Python SDK" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_sdk-1.21.0-py3-none-any.whl", hash = "sha256:9fe633243a8c655fedace3a0b89ccdfc654c0290ea2d8e839bd5db3131186f73"}, - {file = "opentelemetry_sdk-1.21.0.tar.gz", hash = "sha256:3ec8cd3020328d6bc5c9991ccaf9ae820ccb6395a5648d9a95d3ec88275b8879"}, + {file = "opentelemetry_sdk-1.28.1-py3-none-any.whl", hash = "sha256:72aad7f5fcbe37113c4ab4899f6cdeb6ac77ed3e62f25a85e3627b12583dad0f"}, + {file = "opentelemetry_sdk-1.28.1.tar.gz", hash = "sha256:100fa371b2046ffba6a340c18f0b2a0463acad7461e5177e126693b613a6ca57"}, ] [package.dependencies] -opentelemetry-api = "1.21.0" -opentelemetry-semantic-conventions = "0.42b0" +opentelemetry-api = "1.28.1" +opentelemetry-semantic-conventions = "0.49b1" typing-extensions = ">=3.7.4" [[package]] name = "opentelemetry-semantic-conventions" -version = "0.42b0" +version = "0.49b1" description = "OpenTelemetry Semantic Conventions" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_semantic_conventions-0.42b0-py3-none-any.whl", hash = "sha256:5cd719cbfec448af658860796c5d0fcea2fdf0945a2bed2363f42cb1ee39f526"}, - {file = "opentelemetry_semantic_conventions-0.42b0.tar.gz", hash = "sha256:44ae67a0a3252a05072877857e5cc1242c98d4cf12870159f1a94bec800d38ec"}, + {file = "opentelemetry_semantic_conventions-0.49b1-py3-none-any.whl", hash = "sha256:dd6f3ac8169d2198c752e1a63f827e5f5e110ae9b0ce33f2aad9a3baf0739743"}, + {file = "opentelemetry_semantic_conventions-0.49b1.tar.gz", hash = "sha256:91817883b159ffb94c2ca9548509c4fe0aafce7c24f437aa6ac3fc613aa9a758"}, ] +[package.dependencies] +deprecated = ">=1.2.6" +opentelemetry-api = "1.28.1" + [[package]] -name = "opentracing" -version = "2.4.0" -description = "OpenTracing API for Python. See documentation at http://opentracing.io" -optional = true +name = "opentelemetry-util-http" +version = "0.49b1" +description = "Web util for OpenTelemetry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_util_http-0.49b1-py3-none-any.whl", hash = "sha256:0290b942f7888b6310df6803e52e12f4043b8f224db0659f62dc7b70059eb94f"}, + {file = "opentelemetry_util_http-0.49b1.tar.gz", hash = "sha256:6c2bc6f7e20e286dbdfcccb9d895fa290ec9d7c596cdf2e06bf1d8e434b2edd0"}, +] + +[[package]] +name = "orderedmultidict" +version = "1.0.1" +description = "Ordered Multivalue Dictionary" +optional = false python-versions = "*" files = [ - {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"}, + {file = "orderedmultidict-1.0.1-py2.py3-none-any.whl", hash = "sha256:43c839a17ee3cdd62234c47deca1a8508a3f2ca1d0678a3bf791c87cf84adbf3"}, + {file = "orderedmultidict-1.0.1.tar.gz", hash = "sha256:04070bbb5e87291cc9bfa51df413677faf2141c73c61d2a5f7b26bea3cd882ad"}, ] -[package.extras] -tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pytest", "pytest-cov", "pytest-mock", "six (>=1.10.0,<2.0)", "sphinx_rtd_theme", "tornado"] +[package.dependencies] +six = ">=1.8.0" [[package]] name = "overrides" @@ -2767,13 +2739,13 @@ files = [ [[package]] name = "packaging" -version = "24.1" +version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] [[package]] @@ -2797,40 +2769,53 @@ doc = ["mkdocs-material"] [[package]] name = "pandas" -version = "2.2.2" +version = "2.2.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" files = [ - {file = "pandas-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce"}, - {file = "pandas-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238"}, - {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08"}, - {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0"}, - {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51"}, - {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8e5a0b00e1e56a842f922e7fae8ae4077aee4af0acb5ae3622bd4b4c30aedf99"}, - {file = "pandas-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ddf818e4e6c7c6f4f7c8a12709696d193976b591cc7dc50588d3d1a6b5dc8772"}, - {file = "pandas-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288"}, - {file = "pandas-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151"}, - {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b"}, - {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee"}, - {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db"}, - {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1"}, - {file = "pandas-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24"}, - {file = "pandas-2.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef"}, - {file = "pandas-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce"}, - {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad"}, - {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad"}, - {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76"}, - {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32"}, - {file = "pandas-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23"}, - {file = "pandas-2.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0ca6377b8fca51815f382bd0b697a0814c8bda55115678cbc94c30aacbb6eff2"}, - {file = "pandas-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9057e6aa78a584bc93a13f0a9bf7e753a5e9770a30b4d758b8d5f2a62a9433cd"}, - {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:001910ad31abc7bf06f49dcc903755d2f7f3a9186c0c040b827e522e9cef0863"}, - {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921"}, - {file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a77e9d1c386196879aa5eb712e77461aaee433e54c68cf253053a73b7e49c33a"}, - {file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92fd6b027924a7e178ac202cfbe25e53368db90d56872d20ffae94b96c7acc57"}, - {file = "pandas-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:640cef9aa381b60e296db324337a554aeeb883ead99dc8f6c18e81a93942f5f4"}, - {file = "pandas-2.2.2.tar.gz", hash = "sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, + {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, + {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, + {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, + {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, + {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, + {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, ] [package.dependencies] @@ -2903,13 +2888,13 @@ files = [ [[package]] name = "pdoc" -version = "14.6.0" +version = "14.7.0" description = "API Documentation for Python Projects" optional = false python-versions = ">=3.8" files = [ - {file = "pdoc-14.6.0-py3-none-any.whl", hash = "sha256:36c42c546a317d8e3e8c0b39645f24161374de0c7066ccaae76628d721e49ba5"}, - {file = "pdoc-14.6.0.tar.gz", hash = "sha256:6e98a24c5e0ca5d188397969cf82581836eaef13f172fc3820047bfe15c61c9a"}, + {file = "pdoc-14.7.0-py3-none-any.whl", hash = "sha256:72377a907efc6b2c5b3c56b717ef34f11d93621dced3b663f3aede0b844c0ad2"}, + {file = "pdoc-14.7.0.tar.gz", hash = "sha256:2d28af9c0acc39180744ad0543e4bbc3223ecba0d1302db315ec521c51f71f93"}, ] [package.dependencies] @@ -2936,19 +2921,19 @@ ptyprocess = ">=0.5" [[package]] name = "platformdirs" -version = "4.2.2" +version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] [[package]] name = "pluggy" @@ -2985,13 +2970,13 @@ virtualenv = ">=20.10.0" [[package]] name = "prometheus-client" -version = "0.20.0" +version = "0.21.0" description = "Python client for the Prometheus monitoring system." optional = false python-versions = ">=3.8" files = [ - {file = "prometheus_client-0.20.0-py3-none-any.whl", hash = "sha256:cde524a85bce83ca359cc837f28b8c0db5cac7aa653a588fd7e84ba061c329e7"}, - {file = "prometheus_client-0.20.0.tar.gz", hash = "sha256:287629d00b147a32dcb2be0b9df905da599b2d82f80377083ec8463309a4bb89"}, + {file = "prometheus_client-0.21.0-py3-none-any.whl", hash = "sha256:4fa6b4dd0ac16d58bb587c04b1caae65b8c5043e85f778f42f5f632f6af2e166"}, + {file = "prometheus_client-0.21.0.tar.gz", hash = "sha256:96c83c606b71ff2b0a433c98889d275f51ffec6c5e267de37c7a2b5c9aa9233e"}, ] [package.extras] @@ -2999,13 +2984,13 @@ twisted = ["twisted"] [[package]] name = "prompt-toolkit" -version = "3.0.47" +version = "3.0.48" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" files = [ - {file = "prompt_toolkit-3.0.47-py3-none-any.whl", hash = "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10"}, - {file = "prompt_toolkit-3.0.47.tar.gz", hash = "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360"}, + {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, + {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"}, ] [package.dependencies] @@ -3013,13 +2998,13 @@ wcwidth = "*" [[package]] name = "proto-plus" -version = "1.24.0" +version = "1.25.0" description = "Beautiful, Pythonic protocol buffers." optional = false python-versions = ">=3.7" files = [ - {file = "proto-plus-1.24.0.tar.gz", hash = "sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445"}, - {file = "proto_plus-1.24.0-py3-none-any.whl", hash = "sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12"}, + {file = "proto_plus-1.25.0-py3-none-any.whl", hash = "sha256:c91fc4a65074ade8e458e95ef8bac34d4008daa7cce4a12d6707066fca648961"}, + {file = "proto_plus-1.25.0.tar.gz", hash = "sha256:fbb17f57f7bd05a68b7707e745e26528b0b3c34e378db91eef93912c54982d91"}, ] [package.dependencies] @@ -3030,22 +3015,22 @@ testing = ["google-api-core (>=1.31.5)"] [[package]] name = "protobuf" -version = "4.25.4" +version = "5.28.3" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-4.25.4-cp310-abi3-win32.whl", hash = "sha256:db9fd45183e1a67722cafa5c1da3e85c6492a5383f127c86c4c4aa4845867dc4"}, - {file = "protobuf-4.25.4-cp310-abi3-win_amd64.whl", hash = "sha256:ba3d8504116a921af46499471c63a85260c1a5fc23333154a427a310e015d26d"}, - {file = "protobuf-4.25.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:eecd41bfc0e4b1bd3fa7909ed93dd14dd5567b98c941d6c1ad08fdcab3d6884b"}, - {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:4c8a70fdcb995dcf6c8966cfa3a29101916f7225e9afe3ced4395359955d3835"}, - {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:3319e073562e2515c6ddc643eb92ce20809f5d8f10fead3332f71c63be6a7040"}, - {file = "protobuf-4.25.4-cp38-cp38-win32.whl", hash = "sha256:7e372cbbda66a63ebca18f8ffaa6948455dfecc4e9c1029312f6c2edcd86c4e1"}, - {file = "protobuf-4.25.4-cp38-cp38-win_amd64.whl", hash = "sha256:051e97ce9fa6067a4546e75cb14f90cf0232dcb3e3d508c448b8d0e4265b61c1"}, - {file = "protobuf-4.25.4-cp39-cp39-win32.whl", hash = "sha256:90bf6fd378494eb698805bbbe7afe6c5d12c8e17fca817a646cd6a1818c696ca"}, - {file = "protobuf-4.25.4-cp39-cp39-win_amd64.whl", hash = "sha256:ac79a48d6b99dfed2729ccccee547b34a1d3d63289c71cef056653a846a2240f"}, - {file = "protobuf-4.25.4-py3-none-any.whl", hash = "sha256:bfbebc1c8e4793cfd58589acfb8a1026be0003e852b9da7db5a4285bde996978"}, - {file = "protobuf-4.25.4.tar.gz", hash = "sha256:0dc4a62cc4052a036ee2204d26fe4d835c62827c855c8a03f29fe6da146b380d"}, + {file = "protobuf-5.28.3-cp310-abi3-win32.whl", hash = "sha256:0c4eec6f987338617072592b97943fdbe30d019c56126493111cf24344c1cc24"}, + {file = "protobuf-5.28.3-cp310-abi3-win_amd64.whl", hash = "sha256:91fba8f445723fcf400fdbe9ca796b19d3b1242cd873907979b9ed71e4afe868"}, + {file = "protobuf-5.28.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a3f6857551e53ce35e60b403b8a27b0295f7d6eb63d10484f12bc6879c715687"}, + {file = "protobuf-5.28.3-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:3fa2de6b8b29d12c61911505d893afe7320ce7ccba4df913e2971461fa36d584"}, + {file = "protobuf-5.28.3-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:712319fbdddb46f21abb66cd33cb9e491a5763b2febd8f228251add221981135"}, + {file = "protobuf-5.28.3-cp38-cp38-win32.whl", hash = "sha256:3e6101d095dfd119513cde7259aa703d16c6bbdfae2554dfe5cfdbe94e32d548"}, + {file = "protobuf-5.28.3-cp38-cp38-win_amd64.whl", hash = "sha256:27b246b3723692bf1068d5734ddaf2fccc2cdd6e0c9b47fe099244d80200593b"}, + {file = "protobuf-5.28.3-cp39-cp39-win32.whl", hash = "sha256:135658402f71bbd49500322c0f736145731b16fc79dc8f367ab544a17eab4535"}, + {file = "protobuf-5.28.3-cp39-cp39-win_amd64.whl", hash = "sha256:70585a70fc2dd4818c51287ceef5bdba6387f88a578c86d47bb34669b5552c36"}, + {file = "protobuf-5.28.3-py3-none-any.whl", hash = "sha256:cee1757663fa32a1ee673434fcf3bf24dd54763c79690201208bafec62f19eed"}, + {file = "protobuf-5.28.3.tar.gz", hash = "sha256:64badbc49180a5e401f373f9ce7ab1d18b63f7dd4a9cdc43c92b9f0b481cef7b"}, ] [[package]] @@ -3101,37 +3086,26 @@ files = [ [package.extras] tests = ["pytest"] -[[package]] -name = "py" -version = "1.11.0" -description = "library with cross-python path, ini-parsing, io, code, log facilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, - {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, -] - [[package]] name = "pyasn1" -version = "0.6.0" +version = "0.6.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = false python-versions = ">=3.8" files = [ - {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, - {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, ] [[package]] name = "pyasn1-modules" -version = "0.4.0" +version = "0.4.1" description = "A collection of ASN.1-based protocols modules" optional = false python-versions = ">=3.8" files = [ - {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, - {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, ] [package.dependencies] @@ -3161,54 +3135,54 @@ files = [ [[package]] name = "pydantic" -version = "1.10.18" +version = "1.10.19" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.18-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e405ffcc1254d76bb0e760db101ee8916b620893e6edfbfee563b3c6f7a67c02"}, - {file = "pydantic-1.10.18-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e306e280ebebc65040034bff1a0a81fd86b2f4f05daac0131f29541cafd80b80"}, - {file = "pydantic-1.10.18-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11d9d9b87b50338b1b7de4ebf34fd29fdb0d219dc07ade29effc74d3d2609c62"}, - {file = "pydantic-1.10.18-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b661ce52c7b5e5f600c0c3c5839e71918346af2ef20062705ae76b5c16914cab"}, - {file = "pydantic-1.10.18-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c20f682defc9ef81cd7eaa485879ab29a86a0ba58acf669a78ed868e72bb89e0"}, - {file = "pydantic-1.10.18-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c5ae6b7c8483b1e0bf59e5f1843e4fd8fd405e11df7de217ee65b98eb5462861"}, - {file = "pydantic-1.10.18-cp310-cp310-win_amd64.whl", hash = "sha256:74fe19dda960b193b0eb82c1f4d2c8e5e26918d9cda858cbf3f41dd28549cb70"}, - {file = "pydantic-1.10.18-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:72fa46abace0a7743cc697dbb830a41ee84c9db8456e8d77a46d79b537efd7ec"}, - {file = "pydantic-1.10.18-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ef0fe7ad7cbdb5f372463d42e6ed4ca9c443a52ce544472d8842a0576d830da5"}, - {file = "pydantic-1.10.18-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a00e63104346145389b8e8f500bc6a241e729feaf0559b88b8aa513dd2065481"}, - {file = "pydantic-1.10.18-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae6fa2008e1443c46b7b3a5eb03800121868d5ab6bc7cda20b5df3e133cde8b3"}, - {file = "pydantic-1.10.18-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9f463abafdc92635da4b38807f5b9972276be7c8c5121989768549fceb8d2588"}, - {file = "pydantic-1.10.18-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3445426da503c7e40baccefb2b2989a0c5ce6b163679dd75f55493b460f05a8f"}, - {file = "pydantic-1.10.18-cp311-cp311-win_amd64.whl", hash = "sha256:467a14ee2183bc9c902579bb2f04c3d3dac00eff52e252850509a562255b2a33"}, - {file = "pydantic-1.10.18-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:efbc8a7f9cb5fe26122acba1852d8dcd1e125e723727c59dcd244da7bdaa54f2"}, - {file = "pydantic-1.10.18-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:24a4a159d0f7a8e26bf6463b0d3d60871d6a52eac5bb6a07a7df85c806f4c048"}, - {file = "pydantic-1.10.18-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b74be007703547dc52e3c37344d130a7bfacca7df112a9e5ceeb840a9ce195c7"}, - {file = "pydantic-1.10.18-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcb20d4cb355195c75000a49bb4a31d75e4295200df620f454bbc6bdf60ca890"}, - {file = "pydantic-1.10.18-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:46f379b8cb8a3585e3f61bf9ae7d606c70d133943f339d38b76e041ec234953f"}, - {file = "pydantic-1.10.18-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cbfbca662ed3729204090c4d09ee4beeecc1a7ecba5a159a94b5a4eb24e3759a"}, - {file = "pydantic-1.10.18-cp312-cp312-win_amd64.whl", hash = "sha256:c6d0a9f9eccaf7f438671a64acf654ef0d045466e63f9f68a579e2383b63f357"}, - {file = "pydantic-1.10.18-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3d5492dbf953d7d849751917e3b2433fb26010d977aa7a0765c37425a4026ff1"}, - {file = "pydantic-1.10.18-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe734914977eed33033b70bfc097e1baaffb589517863955430bf2e0846ac30f"}, - {file = "pydantic-1.10.18-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15fdbe568beaca9aacfccd5ceadfb5f1a235087a127e8af5e48df9d8a45ae85c"}, - {file = "pydantic-1.10.18-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c3e742f62198c9eb9201781fbebe64533a3bbf6a76a91b8d438d62b813079dbc"}, - {file = "pydantic-1.10.18-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:19a3bd00b9dafc2cd7250d94d5b578edf7a0bd7daf102617153ff9a8fa37871c"}, - {file = "pydantic-1.10.18-cp37-cp37m-win_amd64.whl", hash = "sha256:2ce3fcf75b2bae99aa31bd4968de0474ebe8c8258a0110903478bd83dfee4e3b"}, - {file = "pydantic-1.10.18-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:335a32d72c51a313b33fa3a9b0fe283503272ef6467910338e123f90925f0f03"}, - {file = "pydantic-1.10.18-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:34a3613c7edb8c6fa578e58e9abe3c0f5e7430e0fc34a65a415a1683b9c32d9a"}, - {file = "pydantic-1.10.18-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9ee4e6ca1d9616797fa2e9c0bfb8815912c7d67aca96f77428e316741082a1b"}, - {file = "pydantic-1.10.18-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23e8ec1ce4e57b4f441fc91e3c12adba023fedd06868445a5b5f1d48f0ab3682"}, - {file = "pydantic-1.10.18-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:44ae8a3e35a54d2e8fa88ed65e1b08967a9ef8c320819a969bfa09ce5528fafe"}, - {file = "pydantic-1.10.18-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5389eb3b48a72da28c6e061a247ab224381435256eb541e175798483368fdd3"}, - {file = "pydantic-1.10.18-cp38-cp38-win_amd64.whl", hash = "sha256:069b9c9fc645474d5ea3653788b544a9e0ccd3dca3ad8c900c4c6eac844b4620"}, - {file = "pydantic-1.10.18-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:80b982d42515632eb51f60fa1d217dfe0729f008e81a82d1544cc392e0a50ddf"}, - {file = "pydantic-1.10.18-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:aad8771ec8dbf9139b01b56f66386537c6fe4e76c8f7a47c10261b69ad25c2c9"}, - {file = "pydantic-1.10.18-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941a2eb0a1509bd7f31e355912eb33b698eb0051730b2eaf9e70e2e1589cae1d"}, - {file = "pydantic-1.10.18-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65f7361a09b07915a98efd17fdec23103307a54db2000bb92095457ca758d485"}, - {file = "pydantic-1.10.18-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6951f3f47cb5ca4da536ab161ac0163cab31417d20c54c6de5ddcab8bc813c3f"}, - {file = "pydantic-1.10.18-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7a4c5eec138a9b52c67f664c7d51d4c7234c5ad65dd8aacd919fb47445a62c86"}, - {file = "pydantic-1.10.18-cp39-cp39-win_amd64.whl", hash = "sha256:49e26c51ca854286bffc22b69787a8d4063a62bf7d83dc21d44d2ff426108518"}, - {file = "pydantic-1.10.18-py3-none-any.whl", hash = "sha256:06a189b81ffc52746ec9c8c007f16e5167c8b0a696e1a726369327e3db7b2a82"}, - {file = "pydantic-1.10.18.tar.gz", hash = "sha256:baebdff1907d1d96a139c25136a9bb7d17e118f133a76a2ef3b845e831e3403a"}, + {file = "pydantic-1.10.19-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a415b9e95fa602b10808113967f72b2da8722061265d6af69268c111c254832d"}, + {file = "pydantic-1.10.19-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:11965f421f7eb026439d4eb7464e9182fe6d69c3d4d416e464a4485d1ba61ab6"}, + {file = "pydantic-1.10.19-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5bb81fcfc6d5bff62cd786cbd87480a11d23f16d5376ad2e057c02b3b44df96"}, + {file = "pydantic-1.10.19-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83ee8c9916689f8e6e7d90161e6663ac876be2efd32f61fdcfa3a15e87d4e413"}, + {file = "pydantic-1.10.19-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0399094464ae7f28482de22383e667625e38e1516d6b213176df1acdd0c477ea"}, + {file = "pydantic-1.10.19-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8b2cf5e26da84f2d2dee3f60a3f1782adedcee785567a19b68d0af7e1534bd1f"}, + {file = "pydantic-1.10.19-cp310-cp310-win_amd64.whl", hash = "sha256:1fc8cc264afaf47ae6a9bcbd36c018d0c6b89293835d7fb0e5e1a95898062d59"}, + {file = "pydantic-1.10.19-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d7a8a1dd68bac29f08f0a3147de1885f4dccec35d4ea926e6e637fac03cdb4b3"}, + {file = "pydantic-1.10.19-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:07d00ca5ef0de65dd274005433ce2bb623730271d495a7d190a91c19c5679d34"}, + {file = "pydantic-1.10.19-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad57004e5d73aee36f1e25e4e73a4bc853b473a1c30f652dc8d86b0a987ffce3"}, + {file = "pydantic-1.10.19-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dce355fe7ae53e3090f7f5fa242423c3a7b53260747aa398b4b3aaf8b25f41c3"}, + {file = "pydantic-1.10.19-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0d32227ea9a3bf537a2273fd2fdb6d64ab4d9b83acd9e4e09310a777baaabb98"}, + {file = "pydantic-1.10.19-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e351df83d1c9cffa53d4e779009a093be70f1d5c6bb7068584086f6a19042526"}, + {file = "pydantic-1.10.19-cp311-cp311-win_amd64.whl", hash = "sha256:d8d72553d2f3f57ce547de4fa7dc8e3859927784ab2c88343f1fc1360ff17a08"}, + {file = "pydantic-1.10.19-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d5b5b7c6bafaef90cbb7dafcb225b763edd71d9e22489647ee7df49d6d341890"}, + {file = "pydantic-1.10.19-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:570ad0aeaf98b5e33ff41af75aba2ef6604ee25ce0431ecd734a28e74a208555"}, + {file = "pydantic-1.10.19-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0890fbd7fec9e151c7512941243d830b2d6076d5df159a2030952d480ab80a4e"}, + {file = "pydantic-1.10.19-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec5c44e6e9eac5128a9bfd21610df3b8c6b17343285cc185105686888dc81206"}, + {file = "pydantic-1.10.19-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6eb56074b11a696e0b66c7181da682e88c00e5cebe6570af8013fcae5e63e186"}, + {file = "pydantic-1.10.19-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9d7d48fbc5289efd23982a0d68e973a1f37d49064ccd36d86de4543aff21e086"}, + {file = "pydantic-1.10.19-cp312-cp312-win_amd64.whl", hash = "sha256:fd34012691fbd4e67bdf4accb1f0682342101015b78327eaae3543583fcd451e"}, + {file = "pydantic-1.10.19-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4a5d5b877c7d3d9e17399571a8ab042081d22fe6904416a8b20f8af5909e6c8f"}, + {file = "pydantic-1.10.19-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c46f58ef2df958ed2ea7437a8be0897d5efe9ee480818405338c7da88186fb3"}, + {file = "pydantic-1.10.19-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d8a38a44bb6a15810084316ed69c854a7c06e0c99c5429f1d664ad52cec353c"}, + {file = "pydantic-1.10.19-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a82746c6d6e91ca17e75f7f333ed41d70fce93af520a8437821dec3ee52dfb10"}, + {file = "pydantic-1.10.19-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:566bebdbe6bc0ac593fa0f67d62febbad9f8be5433f686dc56401ba4aab034e3"}, + {file = "pydantic-1.10.19-cp37-cp37m-win_amd64.whl", hash = "sha256:22a1794e01591884741be56c6fba157c4e99dcc9244beb5a87bd4aa54b84ea8b"}, + {file = "pydantic-1.10.19-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:076c49e24b73d346c45f9282d00dbfc16eef7ae27c970583d499f11110d9e5b0"}, + {file = "pydantic-1.10.19-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5d4320510682d5a6c88766b2a286d03b87bd3562bf8d78c73d63bab04b21e7b4"}, + {file = "pydantic-1.10.19-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e66aa0fa7f8aa9d0a620361834f6eb60d01d3e9cea23ca1a92cda99e6f61dac"}, + {file = "pydantic-1.10.19-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d216f8d0484d88ab72ab45d699ac669fe031275e3fa6553e3804e69485449fa0"}, + {file = "pydantic-1.10.19-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:9f28a81978e936136c44e6a70c65bde7548d87f3807260f73aeffbf76fb94c2f"}, + {file = "pydantic-1.10.19-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d3449633c207ec3d2d672eedb3edbe753e29bd4e22d2e42a37a2c1406564c20f"}, + {file = "pydantic-1.10.19-cp38-cp38-win_amd64.whl", hash = "sha256:7ea24e8614f541d69ea72759ff635df0e612b7dc9d264d43f51364df310081a3"}, + {file = "pydantic-1.10.19-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:573254d844f3e64093f72fcd922561d9c5696821ff0900a0db989d8c06ab0c25"}, + {file = "pydantic-1.10.19-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ff09600cebe957ecbb4a27496fe34c1d449e7957ed20a202d5029a71a8af2e35"}, + {file = "pydantic-1.10.19-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4739c206bfb6bb2bdc78dcd40bfcebb2361add4ceac6d170e741bb914e9eff0f"}, + {file = "pydantic-1.10.19-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0bfb5b378b78229119d66ced6adac2e933c67a0aa1d0a7adffbe432f3ec14ce4"}, + {file = "pydantic-1.10.19-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7f31742c95e3f9443b8c6fa07c119623e61d76603be9c0d390bcf7e888acabcb"}, + {file = "pydantic-1.10.19-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c6444368b651a14c2ce2fb22145e1496f7ab23cbdb978590d47c8d34a7bc0289"}, + {file = "pydantic-1.10.19-cp39-cp39-win_amd64.whl", hash = "sha256:945407f4d08cd12485757a281fca0e5b41408606228612f421aa4ea1b63a095d"}, + {file = "pydantic-1.10.19-py3-none-any.whl", hash = "sha256:2206a1752d9fac011e95ca83926a269fb0ef5536f7e053966d058316e24d929f"}, + {file = "pydantic-1.10.19.tar.gz", hash = "sha256:fea36c2065b7a1d28c6819cc2e93387b43dd5d3cf5a1e82d8132ee23f36d1f10"}, ] [package.dependencies] @@ -3324,13 +3298,13 @@ test = ["flaky", "pretend", "pytest (>=3.0.1)"] [[package]] name = "pyparsing" -version = "3.1.4" +version = "3.2.0" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false -python-versions = ">=3.6.8" +python-versions = ">=3.9" files = [ - {file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"}, - {file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"}, + {file = "pyparsing-3.2.0-py3-none-any.whl", hash = "sha256:93d9577b88da0bbea8cc8334ee8b918ed014968fd2ec383e868fb8afb1ccef84"}, + {file = "pyparsing-3.2.0.tar.gz", hash = "sha256:cbf74e27246d595d9a74b186b810f6fbb86726dbf3b9532efb343f6d7294fe9c"}, ] [package.extras] @@ -3338,13 +3312,13 @@ diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pytest" -version = "8.3.2" +version = "8.3.3" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, - {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, + {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, + {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, ] [package.dependencies] @@ -3487,51 +3461,55 @@ files = [ [[package]] name = "pytz" -version = "2024.1" +version = "2024.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ - {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, - {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, + {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, + {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, ] [[package]] name = "pywin32" -version = "306" +version = "308" description = "Python for Window Extensions" optional = false python-versions = "*" files = [ - {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, - {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, - {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, - {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, - {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, - {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, - {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, - {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, - {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, - {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, - {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, - {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, - {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, - {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, + {file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"}, + {file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"}, + {file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"}, + {file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"}, + {file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"}, + {file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"}, + {file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"}, + {file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"}, + {file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"}, + {file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"}, + {file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"}, + {file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"}, + {file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"}, + {file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"}, + {file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"}, + {file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"}, + {file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"}, + {file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"}, ] [[package]] name = "pywinpty" -version = "2.0.13" +version = "2.0.14" description = "Pseudo terminal support for Windows from Python." optional = false python-versions = ">=3.8" files = [ - {file = "pywinpty-2.0.13-cp310-none-win_amd64.whl", hash = "sha256:697bff211fb5a6508fee2dc6ff174ce03f34a9a233df9d8b5fe9c8ce4d5eaf56"}, - {file = "pywinpty-2.0.13-cp311-none-win_amd64.whl", hash = "sha256:b96fb14698db1284db84ca38c79f15b4cfdc3172065b5137383910567591fa99"}, - {file = "pywinpty-2.0.13-cp312-none-win_amd64.whl", hash = "sha256:2fd876b82ca750bb1333236ce98488c1be96b08f4f7647cfdf4129dfad83c2d4"}, - {file = "pywinpty-2.0.13-cp38-none-win_amd64.whl", hash = "sha256:61d420c2116c0212808d31625611b51caf621fe67f8a6377e2e8b617ea1c1f7d"}, - {file = "pywinpty-2.0.13-cp39-none-win_amd64.whl", hash = "sha256:71cb613a9ee24174730ac7ae439fd179ca34ccb8c5349e8d7b72ab5dea2c6f4b"}, - {file = "pywinpty-2.0.13.tar.gz", hash = "sha256:c34e32351a3313ddd0d7da23d27f835c860d32fe4ac814d372a3ea9594f41dde"}, + {file = "pywinpty-2.0.14-cp310-none-win_amd64.whl", hash = "sha256:0b149c2918c7974f575ba79f5a4aad58bd859a52fa9eb1296cc22aa412aa411f"}, + {file = "pywinpty-2.0.14-cp311-none-win_amd64.whl", hash = "sha256:cf2a43ac7065b3e0dc8510f8c1f13a75fb8fde805efa3b8cff7599a1ef497bc7"}, + {file = "pywinpty-2.0.14-cp312-none-win_amd64.whl", hash = "sha256:55dad362ef3e9408ade68fd173e4f9032b3ce08f68cfe7eacb2c263ea1179737"}, + {file = "pywinpty-2.0.14-cp313-none-win_amd64.whl", hash = "sha256:074fb988a56ec79ca90ed03a896d40707131897cefb8f76f926e3834227f2819"}, + {file = "pywinpty-2.0.14-cp39-none-win_amd64.whl", hash = "sha256:5725fd56f73c0531ec218663bd8c8ff5acc43c78962fab28564871b5fce053fd"}, + {file = "pywinpty-2.0.14.tar.gz", hash = "sha256:18bd9529e4a5daf2d9719aa17788ba6013e594ae94c5a0c27e83df3278b0660e"}, ] [[package]] @@ -3755,90 +3733,105 @@ rpds-py = ">=0.7.0" [[package]] name = "regex" -version = "2024.7.24" +version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" files = [ - {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b0d3f567fafa0633aee87f08b9276c7062da9616931382993c03808bb68ce"}, - {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3426de3b91d1bc73249042742f45c2148803c111d1175b283270177fdf669024"}, - {file = "regex-2024.7.24-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f273674b445bcb6e4409bf8d1be67bc4b58e8b46fd0d560055d515b8830063cd"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23acc72f0f4e1a9e6e9843d6328177ae3074b4182167e34119ec7233dfeccf53"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65fd3d2e228cae024c411c5ccdffae4c315271eee4a8b839291f84f796b34eca"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c414cbda77dbf13c3bc88b073a1a9f375c7b0cb5e115e15d4b73ec3a2fbc6f59"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf7a89eef64b5455835f5ed30254ec19bf41f7541cd94f266ab7cbd463f00c41"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19c65b00d42804e3fbea9708f0937d157e53429a39b7c61253ff15670ff62cb5"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7a5486ca56c8869070a966321d5ab416ff0f83f30e0e2da1ab48815c8d165d46"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f51f9556785e5a203713f5efd9c085b4a45aecd2a42573e2b5041881b588d1f"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a4997716674d36a82eab3e86f8fa77080a5d8d96a389a61ea1d0e3a94a582cf7"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c0abb5e4e8ce71a61d9446040c1e86d4e6d23f9097275c5bd49ed978755ff0fe"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:18300a1d78cf1290fa583cd8b7cde26ecb73e9f5916690cf9d42de569c89b1ce"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:416c0e4f56308f34cdb18c3f59849479dde5b19febdcd6e6fa4d04b6c31c9faa"}, - {file = "regex-2024.7.24-cp310-cp310-win32.whl", hash = "sha256:fb168b5924bef397b5ba13aabd8cf5df7d3d93f10218d7b925e360d436863f66"}, - {file = "regex-2024.7.24-cp310-cp310-win_amd64.whl", hash = "sha256:6b9fc7e9cc983e75e2518496ba1afc524227c163e43d706688a6bb9eca41617e"}, - {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:382281306e3adaaa7b8b9ebbb3ffb43358a7bbf585fa93821300a418bb975281"}, - {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4fdd1384619f406ad9037fe6b6eaa3de2749e2e12084abc80169e8e075377d3b"}, - {file = "regex-2024.7.24-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3d974d24edb231446f708c455fd08f94c41c1ff4f04bcf06e5f36df5ef50b95a"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2ec4419a3fe6cf8a4795752596dfe0adb4aea40d3683a132bae9c30b81e8d73"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb563dd3aea54c797adf513eeec819c4213d7dbfc311874eb4fd28d10f2ff0f2"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45104baae8b9f67569f0f1dca5e1f1ed77a54ae1cd8b0b07aba89272710db61e"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:994448ee01864501912abf2bad9203bffc34158e80fe8bfb5b031f4f8e16da51"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fac296f99283ac232d8125be932c5cd7644084a30748fda013028c815ba3364"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7e37e809b9303ec3a179085415cb5f418ecf65ec98cdfe34f6a078b46ef823ee"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:01b689e887f612610c869421241e075c02f2e3d1ae93a037cb14f88ab6a8934c"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f6442f0f0ff81775eaa5b05af8a0ffa1dda36e9cf6ec1e0d3d245e8564b684ce"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:871e3ab2838fbcb4e0865a6e01233975df3a15e6fce93b6f99d75cacbd9862d1"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c918b7a1e26b4ab40409820ddccc5d49871a82329640f5005f73572d5eaa9b5e"}, - {file = "regex-2024.7.24-cp311-cp311-win32.whl", hash = "sha256:2dfbb8baf8ba2c2b9aa2807f44ed272f0913eeeba002478c4577b8d29cde215c"}, - {file = "regex-2024.7.24-cp311-cp311-win_amd64.whl", hash = "sha256:538d30cd96ed7d1416d3956f94d54e426a8daf7c14527f6e0d6d425fcb4cca52"}, - {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fe4ebef608553aff8deb845c7f4f1d0740ff76fa672c011cc0bacb2a00fbde86"}, - {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:74007a5b25b7a678459f06559504f1eec2f0f17bca218c9d56f6a0a12bfffdad"}, - {file = "regex-2024.7.24-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7df9ea48641da022c2a3c9c641650cd09f0cd15e8908bf931ad538f5ca7919c9"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1141a1dcc32904c47f6846b040275c6e5de0bf73f17d7a409035d55b76f289"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80c811cfcb5c331237d9bad3bea2c391114588cf4131707e84d9493064d267f9"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7214477bf9bd195894cf24005b1e7b496f46833337b5dedb7b2a6e33f66d962c"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d55588cba7553f0b6ec33130bc3e114b355570b45785cebdc9daed8c637dd440"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558a57cfc32adcf19d3f791f62b5ff564922942e389e3cfdb538a23d65a6b610"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a512eed9dfd4117110b1881ba9a59b31433caed0c4101b361f768e7bcbaf93c5"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:86b17ba823ea76256b1885652e3a141a99a5c4422f4a869189db328321b73799"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5eefee9bfe23f6df09ffb6dfb23809f4d74a78acef004aa904dc7c88b9944b05"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:731fcd76bbdbf225e2eb85b7c38da9633ad3073822f5ab32379381e8c3c12e94"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eaef80eac3b4cfbdd6de53c6e108b4c534c21ae055d1dbea2de6b3b8ff3def38"}, - {file = "regex-2024.7.24-cp312-cp312-win32.whl", hash = "sha256:185e029368d6f89f36e526764cf12bf8d6f0e3a2a7737da625a76f594bdfcbfc"}, - {file = "regex-2024.7.24-cp312-cp312-win_amd64.whl", hash = "sha256:2f1baff13cc2521bea83ab2528e7a80cbe0ebb2c6f0bfad15be7da3aed443908"}, - {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:66b4c0731a5c81921e938dcf1a88e978264e26e6ac4ec96a4d21ae0354581ae0"}, - {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:88ecc3afd7e776967fa16c80f974cb79399ee8dc6c96423321d6f7d4b881c92b"}, - {file = "regex-2024.7.24-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:64bd50cf16bcc54b274e20235bf8edbb64184a30e1e53873ff8d444e7ac656b2"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb462f0e346fcf41a901a126b50f8781e9a474d3927930f3490f38a6e73b6950"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a82465ebbc9b1c5c50738536fdfa7cab639a261a99b469c9d4c7dcbb2b3f1e57"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:68a8f8c046c6466ac61a36b65bb2395c74451df2ffb8458492ef49900efed293"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac8e84fff5d27420f3c1e879ce9929108e873667ec87e0c8eeb413a5311adfe"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba2537ef2163db9e6ccdbeb6f6424282ae4dea43177402152c67ef869cf3978b"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:43affe33137fcd679bdae93fb25924979517e011f9dea99163f80b82eadc7e53"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:c9bb87fdf2ab2370f21e4d5636e5317775e5d51ff32ebff2cf389f71b9b13750"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:945352286a541406f99b2655c973852da7911b3f4264e010218bbc1cc73168f2"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:8bc593dcce679206b60a538c302d03c29b18e3d862609317cb560e18b66d10cf"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3f3b6ca8eae6d6c75a6cff525c8530c60e909a71a15e1b731723233331de4169"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c51edc3541e11fbe83f0c4d9412ef6c79f664a3745fab261457e84465ec9d5a8"}, - {file = "regex-2024.7.24-cp38-cp38-win32.whl", hash = "sha256:d0a07763776188b4db4c9c7fb1b8c494049f84659bb387b71c73bbc07f189e96"}, - {file = "regex-2024.7.24-cp38-cp38-win_amd64.whl", hash = "sha256:8fd5afd101dcf86a270d254364e0e8dddedebe6bd1ab9d5f732f274fa00499a5"}, - {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0ffe3f9d430cd37d8fa5632ff6fb36d5b24818c5c986893063b4e5bdb84cdf24"}, - {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25419b70ba00a16abc90ee5fce061228206173231f004437730b67ac77323f0d"}, - {file = "regex-2024.7.24-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33e2614a7ce627f0cdf2ad104797d1f68342d967de3695678c0cb84f530709f8"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d33a0021893ede5969876052796165bab6006559ab845fd7b515a30abdd990dc"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04ce29e2c5fedf296b1a1b0acc1724ba93a36fb14031f3abfb7abda2806c1535"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b16582783f44fbca6fcf46f61347340c787d7530d88b4d590a397a47583f31dd"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:836d3cc225b3e8a943d0b02633fb2f28a66e281290302a79df0e1eaa984ff7c1"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:438d9f0f4bc64e8dea78274caa5af971ceff0f8771e1a2333620969936ba10be"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:973335b1624859cb0e52f96062a28aa18f3a5fc77a96e4a3d6d76e29811a0e6e"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c5e69fd3eb0b409432b537fe3c6f44ac089c458ab6b78dcec14478422879ec5f"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fbf8c2f00904eaf63ff37718eb13acf8e178cb940520e47b2f05027f5bb34ce3"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2757ace61bc4061b69af19e4689fa4416e1a04840f33b441034202b5cd02d4"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:44fc61b99035fd9b3b9453f1713234e5a7c92a04f3577252b45feefe1b327759"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:84c312cdf839e8b579f504afcd7b65f35d60b6285d892b19adea16355e8343c9"}, - {file = "regex-2024.7.24-cp39-cp39-win32.whl", hash = "sha256:ca5b2028c2f7af4e13fb9fc29b28d0ce767c38c7facdf64f6c2cd040413055f1"}, - {file = "regex-2024.7.24-cp39-cp39-win_amd64.whl", hash = "sha256:7c479f5ae937ec9985ecaf42e2e10631551d909f203e31308c12d703922742f9"}, - {file = "regex-2024.7.24.tar.gz", hash = "sha256:9cfd009eed1a46b27c14039ad5bbc5e71b6367c5b2e6d5f5da0ea91600817506"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164d8b7b3b4bcb2068b97428060b2a53be050085ef94eca7f240e7947f1b080e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3660c82f209655a06b587d55e723f0b813d3a7db2e32e5e7dc64ac2a9e86fde"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d22326fcdef5e08c154280b71163ced384b428343ae16a5ab2b3354aed12436e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1ac758ef6aebfc8943560194e9fd0fa18bcb34d89fd8bd2af18183afd8da3a2"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997d6a487ff00807ba810e0f8332c18b4eb8d29463cfb7c820dc4b6e7562d0cf"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02a02d2bb04fec86ad61f3ea7f49c015a0681bf76abb9857f945d26159d2968c"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f02f93b92358ee3f78660e43b4b0091229260c5d5c408d17d60bf26b6c900e86"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06eb1be98df10e81ebaded73fcd51989dcf534e3c753466e4b60c4697a003b67"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:040df6fe1a5504eb0f04f048e6d09cd7c7110fef851d7c567a6b6e09942feb7d"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabbfc59f2c6edba2a6622c647b716e34e8e3867e0ab975412c5c2f79b82da2"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8447d2d39b5abe381419319f942de20b7ecd60ce86f16a23b0698f22e1b70008"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da8f5fc57d1933de22a9e23eec290a0d8a5927a5370d24bda9a6abe50683fe62"}, + {file = "regex-2024.11.6-cp310-cp310-win32.whl", hash = "sha256:b489578720afb782f6ccf2840920f3a32e31ba28a4b162e13900c3e6bd3f930e"}, + {file = "regex-2024.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:5071b2093e793357c9d8b2929dfc13ac5f0a6c650559503bb81189d0a3814519"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45"}, + {file = "regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9"}, + {file = "regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"}, + {file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"}, + {file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d"}, + {file = "regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff"}, + {file = "regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a51ccc315653ba012774efca4f23d1d2a8a8f278a6072e29c7147eee7da446b"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ad182d02e40de7459b73155deb8996bbd8e96852267879396fb274e8700190e3"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba9b72e5643641b7d41fa1f6d5abda2c9a263ae835b917348fc3c928182ad467"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40291b1b89ca6ad8d3f2b82782cc33807f1406cf68c8d440861da6304d8ffbbd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdf58d0e516ee426a48f7b2c03a332a4114420716d55769ff7108c37a09951bf"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a36fdf2af13c2b14738f6e973aba563623cb77d753bbbd8d414d18bfaa3105dd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1cee317bfc014c2419a76bcc87f071405e3966da434e03e13beb45f8aced1a6"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50153825ee016b91549962f970d6a4442fa106832e14c918acd1c8e479916c4f"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea1bfda2f7162605f6e8178223576856b3d791109f15ea99a9f95c16a7636fb5"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:df951c5f4a1b1910f1a99ff42c473ff60f8225baa1cdd3539fe2819d9543e9df"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:072623554418a9911446278f16ecb398fb3b540147a7828c06e2011fa531e773"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f654882311409afb1d780b940234208a252322c24a93b442ca714d119e68086c"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:89d75e7293d2b3e674db7d4d9b1bee7f8f3d1609428e293771d1a962617150cc"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f65557897fc977a44ab205ea871b690adaef6b9da6afda4790a2484b04293a5f"}, + {file = "regex-2024.11.6-cp38-cp38-win32.whl", hash = "sha256:6f44ec28b1f858c98d3036ad5d7d0bfc568bdd7a74f9c24e25f41ef1ebfd81a4"}, + {file = "regex-2024.11.6-cp38-cp38-win_amd64.whl", hash = "sha256:bb8f74f2f10dbf13a0be8de623ba4f9491faf58c24064f32b65679b021ed0001"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5704e174f8ccab2026bd2f1ab6c510345ae8eac818b613d7d73e785f1310f839"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:220902c3c5cc6af55d4fe19ead504de80eb91f786dc102fbd74894b1551f095e"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7e351589da0850c125f1600a4c4ba3c722efefe16b297de54300f08d734fbf"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5056b185ca113c88e18223183aa1a50e66507769c9640a6ff75859619d73957b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e34b51b650b23ed3354b5a07aab37034d9f923db2a40519139af34f485f77d0"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5670bce7b200273eee1840ef307bfa07cda90b38ae56e9a6ebcc9f50da9c469b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08986dce1339bc932923e7d1232ce9881499a0e02925f7402fb7c982515419ef"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c0b12d3d3bc25af4ebbf38f9ee780a487e8bf6954c115b9f015822d3bb8e48"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:764e71f22ab3b305e7f4c21f1a97e1526a25ebdd22513e251cf376760213da13"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f056bf21105c2515c32372bbc057f43eb02aae2fda61052e2f7622c801f0b4e2"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69ab78f848845569401469da20df3e081e6b5a11cb086de3eed1d48f5ed57c95"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:86fddba590aad9208e2fa8b43b4c098bb0ec74f15718bb6a704e3c63e2cef3e9"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:684d7a212682996d21ca12ef3c17353c021fe9de6049e19ac8481ec35574a70f"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a03e02f48cd1abbd9f3b7e3586d97c8f7a9721c436f51a5245b3b9483044480b"}, + {file = "regex-2024.11.6-cp39-cp39-win32.whl", hash = "sha256:41758407fc32d5c3c5de163888068cfee69cb4c2be844e7ac517a52770f9af57"}, + {file = "regex-2024.11.6-cp39-cp39-win_amd64.whl", hash = "sha256:b2837718570f95dd41675328e111345f9b7095d821bac435aac173ac80b19983"}, + {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"}, ] [[package]] @@ -3862,6 +3855,24 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "requests-oauth2client" +version = "1.6.0" +description = "An OAuth2.x client based on `requests`." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests_oauth2client-1.6.0-py3-none-any.whl", hash = "sha256:fa702619409cc93ab6433871d1ccec58140a70d86923fd742983fac47b334881"}, + {file = "requests_oauth2client-1.6.0.tar.gz", hash = "sha256:53f4a82b566d21707ecd7e4b8cdab019e6eb0965f26c4ac0484b9e28ffd221ee"}, +] + +[package.dependencies] +attrs = ">=23.2.0" +binapy = ">=0.8" +furl = ">=2.1.2" +jwskate = ">=0.11.1" +requests = ">=2.19.0" + [[package]] name = "requests-oauthlib" version = "2.0.0" @@ -3907,114 +3918,101 @@ files = [ [[package]] name = "rpds-py" -version = "0.20.0" +version = "0.21.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "rpds_py-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3ad0fda1635f8439cde85c700f964b23ed5fc2d28016b32b9ee5fe30da5c84e2"}, - {file = "rpds_py-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9bb4a0d90fdb03437c109a17eade42dfbf6190408f29b2744114d11586611d6f"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6377e647bbfd0a0b159fe557f2c6c602c159fc752fa316572f012fc0bf67150"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb851b7df9dda52dc1415ebee12362047ce771fc36914586b2e9fcbd7d293b3e"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e0f80b739e5a8f54837be5d5c924483996b603d5502bfff79bf33da06164ee2"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a8c94dad2e45324fc74dce25e1645d4d14df9a4e54a30fa0ae8bad9a63928e3"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e604fe73ba048c06085beaf51147eaec7df856824bfe7b98657cf436623daf"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:df3de6b7726b52966edf29663e57306b23ef775faf0ac01a3e9f4012a24a4140"}, - {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf258ede5bc22a45c8e726b29835b9303c285ab46fc7c3a4cc770736b5304c9f"}, - {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:55fea87029cded5df854ca7e192ec7bdb7ecd1d9a3f63d5c4eb09148acf4a7ce"}, - {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ae94bd0b2f02c28e199e9bc51485d0c5601f58780636185660f86bf80c89af94"}, - {file = "rpds_py-0.20.0-cp310-none-win32.whl", hash = "sha256:28527c685f237c05445efec62426d285e47a58fb05ba0090a4340b73ecda6dee"}, - {file = "rpds_py-0.20.0-cp310-none-win_amd64.whl", hash = "sha256:238a2d5b1cad28cdc6ed15faf93a998336eb041c4e440dd7f902528b8891b399"}, - {file = "rpds_py-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac2f4f7a98934c2ed6505aead07b979e6f999389f16b714448fb39bbaa86a489"}, - {file = "rpds_py-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:220002c1b846db9afd83371d08d239fdc865e8f8c5795bbaec20916a76db3318"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d7919548df3f25374a1f5d01fbcd38dacab338ef5f33e044744b5c36729c8db"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:758406267907b3781beee0f0edfe4a179fbd97c0be2e9b1154d7f0a1279cf8e5"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d61339e9f84a3f0767b1995adfb171a0d00a1185192718a17af6e124728e0f5"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1259c7b3705ac0a0bd38197565a5d603218591d3f6cee6e614e380b6ba61c6f6"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c1dc0f53856b9cc9a0ccca0a7cc61d3d20a7088201c0937f3f4048c1718a209"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7e60cb630f674a31f0368ed32b2a6b4331b8350d67de53c0359992444b116dd3"}, - {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbe982f38565bb50cb7fb061ebf762c2f254ca3d8c20d4006878766e84266272"}, - {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:514b3293b64187172bc77c8fb0cdae26981618021053b30d8371c3a902d4d5ad"}, - {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d0a26ffe9d4dd35e4dfdd1e71f46401cff0181c75ac174711ccff0459135fa58"}, - {file = "rpds_py-0.20.0-cp311-none-win32.whl", hash = "sha256:89c19a494bf3ad08c1da49445cc5d13d8fefc265f48ee7e7556839acdacf69d0"}, - {file = "rpds_py-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:c638144ce971df84650d3ed0096e2ae7af8e62ecbbb7b201c8935c370df00a2c"}, - {file = "rpds_py-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a84ab91cbe7aab97f7446652d0ed37d35b68a465aeef8fc41932a9d7eee2c1a6"}, - {file = "rpds_py-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:56e27147a5a4c2c21633ff8475d185734c0e4befd1c989b5b95a5d0db699b21b"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2580b0c34583b85efec8c5c5ec9edf2dfe817330cc882ee972ae650e7b5ef739"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b80d4a7900cf6b66bb9cee5c352b2d708e29e5a37fe9bf784fa97fc11504bf6c"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50eccbf054e62a7b2209b28dc7a22d6254860209d6753e6b78cfaeb0075d7bee"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49a8063ea4296b3a7e81a5dfb8f7b2d73f0b1c20c2af401fb0cdf22e14711a96"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea438162a9fcbee3ecf36c23e6c68237479f89f962f82dae83dc15feeceb37e4"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:18d7585c463087bddcfa74c2ba267339f14f2515158ac4db30b1f9cbdb62c8ef"}, - {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d4c7d1a051eeb39f5c9547e82ea27cbcc28338482242e3e0b7768033cb083821"}, - {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4df1e3b3bec320790f699890d41c59d250f6beda159ea3c44c3f5bac1976940"}, - {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174"}, - {file = "rpds_py-0.20.0-cp312-none-win32.whl", hash = "sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139"}, - {file = "rpds_py-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585"}, - {file = "rpds_py-0.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:aa9a0521aeca7d4941499a73ad7d4f8ffa3d1affc50b9ea11d992cd7eff18a29"}, - {file = "rpds_py-0.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1f1d51eccb7e6c32ae89243cb352389228ea62f89cd80823ea7dd1b98e0b91"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a86a9b96070674fc88b6f9f71a97d2c1d3e5165574615d1f9168ecba4cecb24"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c8ef2ebf76df43f5750b46851ed1cdf8f109d7787ca40035fe19fbdc1acc5a7"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b25f024b421d5859d156750ea9a65651793d51b76a2e9238c05c9d5f203a9"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57eb94a8c16ab08fef6404301c38318e2c5a32216bf5de453e2714c964c125c8"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1940dae14e715e2e02dfd5b0f64a52e8374a517a1e531ad9412319dc3ac7879"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d20277fd62e1b992a50c43f13fbe13277a31f8c9f70d59759c88f644d66c619f"}, - {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:06db23d43f26478303e954c34c75182356ca9aa7797d22c5345b16871ab9c45c"}, - {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2a5db5397d82fa847e4c624b0c98fe59d2d9b7cf0ce6de09e4d2e80f8f5b3f2"}, - {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a35df9f5548fd79cb2f52d27182108c3e6641a4feb0f39067911bf2adaa3e57"}, - {file = "rpds_py-0.20.0-cp313-none-win32.whl", hash = "sha256:fd2d84f40633bc475ef2d5490b9c19543fbf18596dcb1b291e3a12ea5d722f7a"}, - {file = "rpds_py-0.20.0-cp313-none-win_amd64.whl", hash = "sha256:9bc2d153989e3216b0559251b0c260cfd168ec78b1fac33dd485750a228db5a2"}, - {file = "rpds_py-0.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f2fbf7db2012d4876fb0d66b5b9ba6591197b0f165db8d99371d976546472a24"}, - {file = "rpds_py-0.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1e5f3cd7397c8f86c8cc72d5a791071431c108edd79872cdd96e00abd8497d29"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce9845054c13696f7af7f2b353e6b4f676dab1b4b215d7fe5e05c6f8bb06f965"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c3e130fd0ec56cb76eb49ef52faead8ff09d13f4527e9b0c400307ff72b408e1"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b16aa0107ecb512b568244ef461f27697164d9a68d8b35090e9b0c1c8b27752"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7f429242aae2947246587d2964fad750b79e8c233a2367f71b554e9447949c"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0fc424a5842a11e28956e69395fbbeab2c97c42253169d87e90aac2886d751"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8c00a3b1e70c1d3891f0db1b05292747f0dbcfb49c43f9244d04c70fbc40eb8"}, - {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:40ce74fc86ee4645d0a225498d091d8bc61f39b709ebef8204cb8b5a464d3c0e"}, - {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4fe84294c7019456e56d93e8ababdad5a329cd25975be749c3f5f558abb48253"}, - {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:338ca4539aad4ce70a656e5187a3a31c5204f261aef9f6ab50e50bcdffaf050a"}, - {file = "rpds_py-0.20.0-cp38-none-win32.whl", hash = "sha256:54b43a2b07db18314669092bb2de584524d1ef414588780261e31e85846c26a5"}, - {file = "rpds_py-0.20.0-cp38-none-win_amd64.whl", hash = "sha256:a1862d2d7ce1674cffa6d186d53ca95c6e17ed2b06b3f4c476173565c862d232"}, - {file = "rpds_py-0.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3fde368e9140312b6e8b6c09fb9f8c8c2f00999d1823403ae90cc00480221b22"}, - {file = "rpds_py-0.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9824fb430c9cf9af743cf7aaf6707bf14323fb51ee74425c380f4c846ea70789"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11ef6ce74616342888b69878d45e9f779b95d4bd48b382a229fe624a409b72c5"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c52d3f2f82b763a24ef52f5d24358553e8403ce05f893b5347098014f2d9eff2"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d35cef91e59ebbeaa45214861874bc6f19eb35de96db73e467a8358d701a96c"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d72278a30111e5b5525c1dd96120d9e958464316f55adb030433ea905866f4de"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c29cbbba378759ac5786730d1c3cb4ec6f8ababf5c42a9ce303dc4b3d08cda"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6632f2d04f15d1bd6fe0eedd3b86d9061b836ddca4c03d5cf5c7e9e6b7c14580"}, - {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d0b67d87bb45ed1cd020e8fbf2307d449b68abc45402fe1a4ac9e46c3c8b192b"}, - {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ec31a99ca63bf3cd7f1a5ac9fe95c5e2d060d3c768a09bc1d16e235840861420"}, - {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e6c9976e38f4d8c4a63bd8a8edac5307dffd3ee7e6026d97f3cc3a2dc02a0b"}, - {file = "rpds_py-0.20.0-cp39-none-win32.whl", hash = "sha256:569b3ea770c2717b730b61998b6c54996adee3cef69fc28d444f3e7920313cf7"}, - {file = "rpds_py-0.20.0-cp39-none-win_amd64.whl", hash = "sha256:e6900ecdd50ce0facf703f7a00df12374b74bbc8ad9fe0f6559947fb20f82364"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:617c7357272c67696fd052811e352ac54ed1d9b49ab370261a80d3b6ce385045"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9426133526f69fcaba6e42146b4e12d6bc6c839b8b555097020e2b78ce908dcc"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deb62214c42a261cb3eb04d474f7155279c1a8a8c30ac89b7dcb1721d92c3c02"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcaeb7b57f1a1e071ebd748984359fef83ecb026325b9d4ca847c95bc7311c92"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d454b8749b4bd70dd0a79f428731ee263fa6995f83ccb8bada706e8d1d3ff89d"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d807dc2051abe041b6649681dce568f8e10668e3c1c6543ebae58f2d7e617855"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c20f0ddeb6e29126d45f89206b8291352b8c5b44384e78a6499d68b52ae511"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b7f19250ceef892adf27f0399b9e5afad019288e9be756d6919cb58892129f51"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4f1ed4749a08379555cebf4650453f14452eaa9c43d0a95c49db50c18b7da075"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:dcedf0b42bcb4cfff4101d7771a10532415a6106062f005ab97d1d0ab5681c60"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:39ed0d010457a78f54090fafb5d108501b5aa5604cc22408fc1c0c77eac14344"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bb273176be34a746bdac0b0d7e4e2c467323d13640b736c4c477881a3220a989"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f918a1a130a6dfe1d7fe0f105064141342e7dd1611f2e6a21cd2f5c8cb1cfb3e"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f60012a73aa396be721558caa3a6fd49b3dd0033d1675c6d59c4502e870fcf0c"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d2b1ad682a3dfda2a4e8ad8572f3100f95fad98cb99faf37ff0ddfe9cbf9d03"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:614fdafe9f5f19c63ea02817fa4861c606a59a604a77c8cdef5aa01d28b97921"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa518bcd7600c584bf42e6617ee8132869e877db2f76bcdc281ec6a4113a53ab"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0475242f447cc6cb8a9dd486d68b2ef7fbee84427124c232bff5f63b1fe11e5"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90a4cd061914a60bd51c68bcb4357086991bd0bb93d8aa66a6da7701370708f"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:def7400461c3a3f26e49078302e1c1b38f6752342c77e3cf72ce91ca69fb1bc1"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:65794e4048ee837494aea3c21a28ad5fc080994dfba5b036cf84de37f7ad5074"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:faefcc78f53a88f3076b7f8be0a8f8d35133a3ecf7f3770895c25f8813460f08"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5b4f105deeffa28bbcdff6c49b34e74903139afa690e35d2d9e3c2c2fba18cec"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdfc3a892927458d98f3d55428ae46b921d1f7543b89382fdb483f5640daaec8"}, - {file = "rpds_py-0.20.0.tar.gz", hash = "sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121"}, + {file = "rpds_py-0.21.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a017f813f24b9df929674d0332a374d40d7f0162b326562daae8066b502d0590"}, + {file = "rpds_py-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:20cc1ed0bcc86d8e1a7e968cce15be45178fd16e2ff656a243145e0b439bd250"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad116dda078d0bc4886cb7840e19811562acdc7a8e296ea6ec37e70326c1b41c"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:808f1ac7cf3b44f81c9475475ceb221f982ef548e44e024ad5f9e7060649540e"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de552f4a1916e520f2703ec474d2b4d3f86d41f353e7680b597512ffe7eac5d0"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:efec946f331349dfc4ae9d0e034c263ddde19414fe5128580f512619abed05f1"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b80b4690bbff51a034bfde9c9f6bf9357f0a8c61f548942b80f7b66356508bf5"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:085ed25baac88953d4283e5b5bd094b155075bb40d07c29c4f073e10623f9f2e"}, + {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:daa8efac2a1273eed2354397a51216ae1e198ecbce9036fba4e7610b308b6153"}, + {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:95a5bad1ac8a5c77b4e658671642e4af3707f095d2b78a1fdd08af0dfb647624"}, + {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3e53861b29a13d5b70116ea4230b5f0f3547b2c222c5daa090eb7c9c82d7f664"}, + {file = "rpds_py-0.21.0-cp310-none-win32.whl", hash = "sha256:ea3a6ac4d74820c98fcc9da4a57847ad2cc36475a8bd9683f32ab6d47a2bd682"}, + {file = "rpds_py-0.21.0-cp310-none-win_amd64.whl", hash = "sha256:b8f107395f2f1d151181880b69a2869c69e87ec079c49c0016ab96860b6acbe5"}, + {file = "rpds_py-0.21.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5555db3e618a77034954b9dc547eae94166391a98eb867905ec8fcbce1308d95"}, + {file = "rpds_py-0.21.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:97ef67d9bbc3e15584c2f3c74bcf064af36336c10d2e21a2131e123ce0f924c9"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ab2c2a26d2f69cdf833174f4d9d86118edc781ad9a8fa13970b527bf8236027"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4e8921a259f54bfbc755c5bbd60c82bb2339ae0324163f32868f63f0ebb873d9"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a7ff941004d74d55a47f916afc38494bd1cfd4b53c482b77c03147c91ac0ac3"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5145282a7cd2ac16ea0dc46b82167754d5e103a05614b724457cffe614f25bd8"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de609a6f1b682f70bb7163da745ee815d8f230d97276db049ab447767466a09d"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40c91c6e34cf016fa8e6b59d75e3dbe354830777fcfd74c58b279dceb7975b75"}, + {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d2132377f9deef0c4db89e65e8bb28644ff75a18df5293e132a8d67748397b9f"}, + {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0a9e0759e7be10109645a9fddaaad0619d58c9bf30a3f248a2ea57a7c417173a"}, + {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9e20da3957bdf7824afdd4b6eeb29510e83e026473e04952dca565170cd1ecc8"}, + {file = "rpds_py-0.21.0-cp311-none-win32.whl", hash = "sha256:f71009b0d5e94c0e86533c0b27ed7cacc1239cb51c178fd239c3cfefefb0400a"}, + {file = "rpds_py-0.21.0-cp311-none-win_amd64.whl", hash = "sha256:e168afe6bf6ab7ab46c8c375606298784ecbe3ba31c0980b7dcbb9631dcba97e"}, + {file = "rpds_py-0.21.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:30b912c965b2aa76ba5168fd610087bad7fcde47f0a8367ee8f1876086ee6d1d"}, + {file = "rpds_py-0.21.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ca9989d5d9b1b300bc18e1801c67b9f6d2c66b8fd9621b36072ed1df2c977f72"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f54e7106f0001244a5f4cf810ba8d3f9c542e2730821b16e969d6887b664266"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fed5dfefdf384d6fe975cc026886aece4f292feaf69d0eeb716cfd3c5a4dd8be"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:590ef88db231c9c1eece44dcfefd7515d8bf0d986d64d0caf06a81998a9e8cab"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f983e4c2f603c95dde63df633eec42955508eefd8d0f0e6d236d31a044c882d7"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b229ce052ddf1a01c67d68166c19cb004fb3612424921b81c46e7ea7ccf7c3bf"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ebf64e281a06c904a7636781d2e973d1f0926a5b8b480ac658dc0f556e7779f4"}, + {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:998a8080c4495e4f72132f3d66ff91f5997d799e86cec6ee05342f8f3cda7dca"}, + {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:98486337f7b4f3c324ab402e83453e25bb844f44418c066623db88e4c56b7c7b"}, + {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a78d8b634c9df7f8d175451cfeac3810a702ccb85f98ec95797fa98b942cea11"}, + {file = "rpds_py-0.21.0-cp312-none-win32.whl", hash = "sha256:a58ce66847711c4aa2ecfcfaff04cb0327f907fead8945ffc47d9407f41ff952"}, + {file = "rpds_py-0.21.0-cp312-none-win_amd64.whl", hash = "sha256:e860f065cc4ea6f256d6f411aba4b1251255366e48e972f8a347cf88077b24fd"}, + {file = "rpds_py-0.21.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ee4eafd77cc98d355a0d02f263efc0d3ae3ce4a7c24740010a8b4012bbb24937"}, + {file = "rpds_py-0.21.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:688c93b77e468d72579351a84b95f976bd7b3e84aa6686be6497045ba84be560"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c38dbf31c57032667dd5a2f0568ccde66e868e8f78d5a0d27dcc56d70f3fcd3b"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2d6129137f43f7fa02d41542ffff4871d4aefa724a5fe38e2c31a4e0fd343fb0"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:520ed8b99b0bf86a176271f6fe23024323862ac674b1ce5b02a72bfeff3fff44"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaeb25ccfb9b9014a10eaf70904ebf3f79faaa8e60e99e19eef9f478651b9b74"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af04ac89c738e0f0f1b913918024c3eab6e3ace989518ea838807177d38a2e94"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b9b76e2afd585803c53c5b29e992ecd183f68285b62fe2668383a18e74abe7a3"}, + {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5afb5efde74c54724e1a01118c6e5c15e54e642c42a1ba588ab1f03544ac8c7a"}, + {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:52c041802a6efa625ea18027a0723676a778869481d16803481ef6cc02ea8cb3"}, + {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ee1e4fc267b437bb89990b2f2abf6c25765b89b72dd4a11e21934df449e0c976"}, + {file = "rpds_py-0.21.0-cp313-none-win32.whl", hash = "sha256:0c025820b78817db6a76413fff6866790786c38f95ea3f3d3c93dbb73b632202"}, + {file = "rpds_py-0.21.0-cp313-none-win_amd64.whl", hash = "sha256:320c808df533695326610a1b6a0a6e98f033e49de55d7dc36a13c8a30cfa756e"}, + {file = "rpds_py-0.21.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:2c51d99c30091f72a3c5d126fad26236c3f75716b8b5e5cf8effb18889ced928"}, + {file = "rpds_py-0.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cbd7504a10b0955ea287114f003b7ad62330c9e65ba012c6223dba646f6ffd05"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6dcc4949be728ede49e6244eabd04064336012b37f5c2200e8ec8eb2988b209c"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f414da5c51bf350e4b7960644617c130140423882305f7574b6cf65a3081cecb"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9afe42102b40007f588666bc7de82451e10c6788f6f70984629db193849dced1"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b929c2bb6e29ab31f12a1117c39f7e6d6450419ab7464a4ea9b0b417174f044"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8404b3717da03cbf773a1d275d01fec84ea007754ed380f63dfc24fb76ce4592"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e12bb09678f38b7597b8346983d2323a6482dcd59e423d9448108c1be37cac9d"}, + {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:58a0e345be4b18e6b8501d3b0aa540dad90caeed814c515e5206bb2ec26736fd"}, + {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:c3761f62fcfccf0864cc4665b6e7c3f0c626f0380b41b8bd1ce322103fa3ef87"}, + {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c2b2f71c6ad6c2e4fc9ed9401080badd1469fa9889657ec3abea42a3d6b2e1ed"}, + {file = "rpds_py-0.21.0-cp39-none-win32.whl", hash = "sha256:b21747f79f360e790525e6f6438c7569ddbfb1b3197b9e65043f25c3c9b489d8"}, + {file = "rpds_py-0.21.0-cp39-none-win_amd64.whl", hash = "sha256:0626238a43152918f9e72ede9a3b6ccc9e299adc8ade0d67c5e142d564c9a83d"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6b4ef7725386dc0762857097f6b7266a6cdd62bfd209664da6712cb26acef035"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6bc0e697d4d79ab1aacbf20ee5f0df80359ecf55db33ff41481cf3e24f206919"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da52d62a96e61c1c444f3998c434e8b263c384f6d68aca8274d2e08d1906325c"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:98e4fe5db40db87ce1c65031463a760ec7906ab230ad2249b4572c2fc3ef1f9f"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30bdc973f10d28e0337f71d202ff29345320f8bc49a31c90e6c257e1ccef4333"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:faa5e8496c530f9c71f2b4e1c49758b06e5f4055e17144906245c99fa6d45356"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32eb88c30b6a4f0605508023b7141d043a79b14acb3b969aa0b4f99b25bc7d4a"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a89a8ce9e4e75aeb7fa5d8ad0f3fecdee813802592f4f46a15754dcb2fd6b061"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:241e6c125568493f553c3d0fdbb38c74babf54b45cef86439d4cd97ff8feb34d"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:3b766a9f57663396e4f34f5140b3595b233a7b146e94777b97a8413a1da1be18"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:af4a644bf890f56e41e74be7d34e9511e4954894d544ec6b8efe1e21a1a8da6c"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3e30a69a706e8ea20444b98a49f386c17b26f860aa9245329bab0851ed100677"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:031819f906bb146561af051c7cef4ba2003d28cff07efacef59da973ff7969ba"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b876f2bc27ab5954e2fd88890c071bd0ed18b9c50f6ec3de3c50a5ece612f7a6"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc5695c321e518d9f03b7ea6abb5ea3af4567766f9852ad1560f501b17588c7b"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b4de1da871b5c0fd5537b26a6fc6814c3cc05cabe0c941db6e9044ffbb12f04a"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:878f6fea96621fda5303a2867887686d7a198d9e0f8a40be100a63f5d60c88c9"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8eeec67590e94189f434c6d11c426892e396ae59e4801d17a93ac96b8c02a6c"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ff2eba7f6c0cb523d7e9cff0903f2fe1feff8f0b2ceb6bd71c0e20a4dcee271"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a429b99337062877d7875e4ff1a51fe788424d522bd64a8c0a20ef3021fdb6ed"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:d167e4dbbdac48bd58893c7e446684ad5d425b407f9336e04ab52e8b9194e2ed"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:4eb2de8a147ffe0626bfdc275fc6563aa7bf4b6db59cf0d44f0ccd6ca625a24e"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e78868e98f34f34a88e23ee9ccaeeec460e4eaf6db16d51d7a9b883e5e785a5e"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4991ca61656e3160cdaca4851151fd3f4a92e9eba5c7a530ab030d6aee96ec89"}, + {file = "rpds_py-0.21.0.tar.gz", hash = "sha256:ed6378c9d66d0de903763e7706383d60c33829581f0adff47b6535f1802fa6db"}, ] [[package]] @@ -4051,93 +4049,54 @@ jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"] [[package]] name = "ruamel-yaml-clib" -version = "0.2.8" +version = "0.2.12" description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" optional = false -python-versions = ">=3.6" -files = [ - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win_amd64.whl", hash = "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b"}, - {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win32.whl", hash = "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win32.whl", hash = "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win32.whl", hash = "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win_amd64.whl", hash = "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15"}, - {file = "ruamel.yaml.clib-0.2.8.tar.gz", hash = "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512"}, -] - -[[package]] -name = "schematic-db" -version = "0.0.41" -description = "" -optional = false -python-versions = ">=3.9,<4.0" +python-versions = ">=3.9" files = [ - {file = "schematic_db-0.0.41-py3-none-any.whl", hash = "sha256:bf8e8a73fb06113431a89a25df15f3eefbe7b40c2cfe149c4e9afa6e6b33fd5b"}, - {file = "schematic_db-0.0.41.tar.gz", hash = "sha256:cd5ec936cdb4fca203de57aa0c771b2b251c5eec7e0af719c388cad70d8d9f6d"}, + {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:11f891336688faf5156a36293a9c362bdc7c88f03a8a027c2c1d8e0bcde998e5"}, + {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a606ef75a60ecf3d924613892cc603b154178ee25abb3055db5062da811fd969"}, + {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd5415dded15c3822597455bc02bcd66e81ef8b7a48cb71a33628fc9fdde39df"}, + {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76"}, + {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6"}, + {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd"}, + {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da"}, + {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28"}, + {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6"}, + {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d84318609196d6bd6da0edfa25cedfbabd8dbde5140a0a23af29ad4b8f91fb1e"}, + {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb43a269eb827806502c7c8efb7ae7e9e9d0573257a46e8e952f4d4caba4f31e"}, + {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52"}, + {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642"}, + {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2"}, + {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4"}, + {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:943f32bc9dedb3abff9879edc134901df92cfce2c3d5c9348f172f62eb2d771d"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c3829bb364fdb8e0332c9931ecf57d9be3519241323c5274bd82f709cebc0c"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4"}, + {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a"}, + {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:e7e3736715fbf53e9be2a79eb4db68e4ed857017344d697e8b9749444ae57475"}, + {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7e75b4965e1d4690e93021adfcecccbca7d61c7bddd8e22406ef2ff20d74ef"}, + {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6"}, + {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf"}, + {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1"}, + {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6"}, + {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3"}, + {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987"}, + {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bc5f1e1c28e966d61d2519f2a3d451ba989f9ea0f2307de7bc45baa526de9e45"}, + {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a0e060aace4c24dcaf71023bbd7d42674e3b230f7e7b97317baf1e953e5b519"}, + {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7"}, + {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285"}, + {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed"}, + {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win32.whl", hash = "sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12"}, + {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win_amd64.whl", hash = "sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b"}, + {file = "ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f"}, ] -[package.dependencies] -deprecation = ">=2.1.0,<3.0.0" -interrogate = ">=1.5.0,<2.0.0" -networkx = ">=2.8.6,<3.0.0" -pandas = ">=2.0.0,<3.0.0" -pydantic = ">=1.10.7,<2.0.0" -PyYAML = ">=6.0,<7.0" -requests = ">=2.28.1,<3.0.0" -SQLAlchemy = ">=2.0.19,<3.0.0" -SQLAlchemy-Utils = ">=0.41.1,<0.42.0" -synapseclient = {version = ">=4.0.0,<5.0.0", optional = true, markers = "extra == \"synapse\""} -tenacity = ">=8.1.0,<9.0.0" -validators = ">=0.20.0,<0.21.0" - -[package.extras] -mysql = ["mysqlclient (>=2.1.1,<3.0.0)"] -postgres = ["psycopg2-binary (>=2.9.5,<3.0.0)"] -synapse = ["synapseclient (>=4.0.0,<5.0.0)"] - [[package]] name = "scipy" version = "1.13.1" @@ -4258,27 +4217,27 @@ files = [ [[package]] name = "sphinx" -version = "7.4.7" +version = "7.3.7" description = "Python documentation generator" optional = false python-versions = ">=3.9" files = [ - {file = "sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239"}, - {file = "sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe"}, + {file = "sphinx-7.3.7-py3-none-any.whl", hash = "sha256:413f75440be4cacf328f580b4274ada4565fb2187d696a84970c23f77b64d8c3"}, + {file = "sphinx-7.3.7.tar.gz", hash = "sha256:a4a7db75ed37531c05002d56ed6948d4c42f473a36f46e1382b0bd76ca9627bc"}, ] [package.dependencies] alabaster = ">=0.7.14,<0.8.0" -babel = ">=2.13" -colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\""} -docutils = ">=0.20,<0.22" +babel = ">=2.9" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +docutils = ">=0.18.1,<0.22" imagesize = ">=1.3" -importlib-metadata = {version = ">=6.0", markers = "python_version < \"3.10\""} -Jinja2 = ">=3.1" -packaging = ">=23.0" -Pygments = ">=2.17" -requests = ">=2.30.0" -snowballstemmer = ">=2.2" +importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""} +Jinja2 = ">=3.0" +packaging = ">=21.0" +Pygments = ">=2.14" +requests = ">=2.25.0" +snowballstemmer = ">=2.0" sphinxcontrib-applehelp = "*" sphinxcontrib-devhelp = "*" sphinxcontrib-htmlhelp = ">=2.0.0" @@ -4289,8 +4248,8 @@ tomli = {version = ">=2", markers = "python_version < \"3.11\""} [package.extras] docs = ["sphinxcontrib-websupport"] -lint = ["flake8 (>=6.0)", "importlib-metadata (>=6.0)", "mypy (==1.10.1)", "pytest (>=6.0)", "ruff (==0.5.2)", "sphinx-lint (>=0.9)", "tomli (>=2)", "types-docutils (==0.21.0.20240711)", "types-requests (>=2.30.0)"] -test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=8.0)", "setuptools (>=70.0)", "typing_extensions (>=4.9)"] +lint = ["flake8 (>=3.5.0)", "importlib_metadata", "mypy (==1.9.0)", "pytest (>=6.0)", "ruff (==0.3.7)", "sphinx-lint", "tomli", "types-docutils", "types-requests"] +test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=6.0)", "setuptools (>=67.0)"] [[package]] name = "sphinx-click" @@ -4402,121 +4361,6 @@ lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] standalone = ["Sphinx (>=5)"] test = ["pytest"] -[[package]] -name = "sqlalchemy" -version = "2.0.34" -description = "Database Abstraction Library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:95d0b2cf8791ab5fb9e3aa3d9a79a0d5d51f55b6357eecf532a120ba3b5524db"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:243f92596f4fd4c8bd30ab8e8dd5965afe226363d75cab2468f2c707f64cd83b"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ea54f7300553af0a2a7235e9b85f4204e1fc21848f917a3213b0e0818de9a24"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:173f5f122d2e1bff8fbd9f7811b7942bead1f5e9f371cdf9e670b327e6703ebd"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:196958cde924a00488e3e83ff917be3b73cd4ed8352bbc0f2989333176d1c54d"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bd90c221ed4e60ac9d476db967f436cfcecbd4ef744537c0f2d5291439848768"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-win32.whl", hash = "sha256:3166dfff2d16fe9be3241ee60ece6fcb01cf8e74dd7c5e0b64f8e19fab44911b"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-win_amd64.whl", hash = "sha256:6831a78bbd3c40f909b3e5233f87341f12d0b34a58f14115c9e94b4cdaf726d3"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7db3db284a0edaebe87f8f6642c2b2c27ed85c3e70064b84d1c9e4ec06d5d84"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:430093fce0efc7941d911d34f75a70084f12f6ca5c15d19595c18753edb7c33b"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79cb400c360c7c210097b147c16a9e4c14688a6402445ac848f296ade6283bbc"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb1b30f31a36c7f3fee848391ff77eebdd3af5750bf95fbf9b8b5323edfdb4ec"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fddde2368e777ea2a4891a3fb4341e910a056be0bb15303bf1b92f073b80c02"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:80bd73ea335203b125cf1d8e50fef06be709619eb6ab9e7b891ea34b5baa2287"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-win32.whl", hash = "sha256:6daeb8382d0df526372abd9cb795c992e18eed25ef2c43afe518c73f8cccb721"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-win_amd64.whl", hash = "sha256:5bc08e75ed11693ecb648b7a0a4ed80da6d10845e44be0c98c03f2f880b68ff4"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:53e68b091492c8ed2bd0141e00ad3089bcc6bf0e6ec4142ad6505b4afe64163e"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bcd18441a49499bf5528deaa9dee1f5c01ca491fc2791b13604e8f972877f812"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:165bbe0b376541092bf49542bd9827b048357f4623486096fc9aaa6d4e7c59a2"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3330415cd387d2b88600e8e26b510d0370db9b7eaf984354a43e19c40df2e2b"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97b850f73f8abbffb66ccbab6e55a195a0eb655e5dc74624d15cff4bfb35bd74"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee4c6917857fd6121ed84f56d1dc78eb1d0e87f845ab5a568aba73e78adf83"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-win32.whl", hash = "sha256:fbb034f565ecbe6c530dff948239377ba859420d146d5f62f0271407ffb8c580"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-win_amd64.whl", hash = "sha256:707c8f44931a4facd4149b52b75b80544a8d824162602b8cd2fe788207307f9a"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:24af3dc43568f3780b7e1e57c49b41d98b2d940c1fd2e62d65d3928b6f95f021"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e60ed6ef0a35c6b76b7640fe452d0e47acc832ccbb8475de549a5cc5f90c2c06"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:413c85cd0177c23e32dee6898c67a5f49296640041d98fddb2c40888fe4daa2e"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:25691f4adfb9d5e796fd48bf1432272f95f4bbe5f89c475a788f31232ea6afba"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:526ce723265643dbc4c7efb54f56648cc30e7abe20f387d763364b3ce7506c82"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-win32.whl", hash = "sha256:13be2cc683b76977a700948411a94c67ad8faf542fa7da2a4b167f2244781cf3"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-win_amd64.whl", hash = "sha256:e54ef33ea80d464c3dcfe881eb00ad5921b60f8115ea1a30d781653edc2fd6a2"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:43f28005141165edd11fbbf1541c920bd29e167b8bbc1fb410d4fe2269c1667a"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b68094b165a9e930aedef90725a8fcfafe9ef95370cbb54abc0464062dbf808f"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1e03db964e9d32f112bae36f0cc1dcd1988d096cfd75d6a588a3c3def9ab2b"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:203d46bddeaa7982f9c3cc693e5bc93db476ab5de9d4b4640d5c99ff219bee8c"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ae92bebca3b1e6bd203494e5ef919a60fb6dfe4d9a47ed2453211d3bd451b9f5"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9661268415f450c95f72f0ac1217cc6f10256f860eed85c2ae32e75b60278ad8"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-win32.whl", hash = "sha256:895184dfef8708e15f7516bd930bda7e50ead069280d2ce09ba11781b630a434"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-win_amd64.whl", hash = "sha256:6e7cde3a2221aa89247944cafb1b26616380e30c63e37ed19ff0bba5e968688d"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dbcdf987f3aceef9763b6d7b1fd3e4ee210ddd26cac421d78b3c206d07b2700b"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ce119fc4ce0d64124d37f66a6f2a584fddc3c5001755f8a49f1ca0a177ef9796"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a17d8fac6df9835d8e2b4c5523666e7051d0897a93756518a1fe101c7f47f2f0"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ebc11c54c6ecdd07bb4efbfa1554538982f5432dfb8456958b6d46b9f834bb7"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2e6965346fc1491a566e019a4a1d3dfc081ce7ac1a736536367ca305da6472a8"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:220574e78ad986aea8e81ac68821e47ea9202b7e44f251b7ed8c66d9ae3f4278"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-win32.whl", hash = "sha256:b75b00083e7fe6621ce13cfce9d4469c4774e55e8e9d38c305b37f13cf1e874c"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-win_amd64.whl", hash = "sha256:c29d03e0adf3cc1a8c3ec62d176824972ae29b67a66cbb18daff3062acc6faa8"}, - {file = "SQLAlchemy-2.0.34-py3-none-any.whl", hash = "sha256:7286c353ee6475613d8beff83167374006c6b3e3f0e6491bfe8ca610eb1dec0f"}, - {file = "sqlalchemy-2.0.34.tar.gz", hash = "sha256:10d8f36990dd929690666679b0f42235c159a7051534adb135728ee52828dd22"}, -] - -[package.dependencies] -greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} -typing-extensions = ">=4.6.0" - -[package.extras] -aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] -aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] -aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] -asyncio = ["greenlet (!=0.4.17)"] -asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] -mssql = ["pyodbc"] -mssql-pymssql = ["pymssql"] -mssql-pyodbc = ["pyodbc"] -mypy = ["mypy (>=0.910)"] -mysql = ["mysqlclient (>=1.4.0)"] -mysql-connector = ["mysql-connector-python"] -oracle = ["cx_oracle (>=8)"] -oracle-oracledb = ["oracledb (>=1.0.1)"] -postgresql = ["psycopg2 (>=2.7)"] -postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] -postgresql-pg8000 = ["pg8000 (>=1.29.1)"] -postgresql-psycopg = ["psycopg (>=3.0.7)"] -postgresql-psycopg2binary = ["psycopg2-binary"] -postgresql-psycopg2cffi = ["psycopg2cffi"] -postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] -pymysql = ["pymysql"] -sqlcipher = ["sqlcipher3_binary"] - -[[package]] -name = "sqlalchemy-utils" -version = "0.41.2" -description = "Various utility functions for SQLAlchemy." -optional = false -python-versions = ">=3.7" -files = [ - {file = "SQLAlchemy-Utils-0.41.2.tar.gz", hash = "sha256:bc599c8c3b3319e53ce6c5c3c471120bd325d0071fb6f38a10e924e3d07b9990"}, - {file = "SQLAlchemy_Utils-0.41.2-py3-none-any.whl", hash = "sha256:85cf3842da2bf060760f955f8467b87983fb2e30f1764fd0e24a48307dc8ec6e"}, -] - -[package.dependencies] -SQLAlchemy = ">=1.3" - -[package.extras] -arrow = ["arrow (>=0.3.4)"] -babel = ["Babel (>=1.3)"] -color = ["colour (>=0.0.4)"] -encrypted = ["cryptography (>=0.6)"] -intervals = ["intervals (>=0.7.1)"] -password = ["passlib (>=1.6,<2.0)"] -pendulum = ["pendulum (>=2.0.5)"] -phone = ["phonenumbers (>=5.9.2)"] -test = ["Jinja2 (>=2.3)", "Pygments (>=1.2)", "backports.zoneinfo", "docutils (>=0.10)", "flake8 (>=2.4.0)", "flexmock (>=0.9.7)", "isort (>=4.2.2)", "pg8000 (>=1.12.4)", "psycopg (>=3.1.8)", "psycopg2 (>=2.5.1)", "psycopg2cffi (>=2.8.1)", "pymysql", "pyodbc", "pytest (==7.4.4)", "python-dateutil (>=2.6)", "pytz (>=2014.2)"] -test-all = ["Babel (>=1.3)", "Jinja2 (>=2.3)", "Pygments (>=1.2)", "arrow (>=0.3.4)", "backports.zoneinfo", "colour (>=0.0.4)", "cryptography (>=0.6)", "docutils (>=0.10)", "flake8 (>=2.4.0)", "flexmock (>=0.9.7)", "furl (>=0.4.1)", "intervals (>=0.7.1)", "isort (>=4.2.2)", "passlib (>=1.6,<2.0)", "pendulum (>=2.0.5)", "pg8000 (>=1.12.4)", "phonenumbers (>=5.9.2)", "psycopg (>=3.1.8)", "psycopg2 (>=2.5.1)", "psycopg2cffi (>=2.8.1)", "pymysql", "pyodbc", "pytest (==7.4.4)", "python-dateutil", "python-dateutil (>=2.6)", "pytz (>=2014.2)"] -timezone = ["python-dateutil"] -url = ["furl (>=0.4.1)"] - [[package]] name = "stack-data" version = "0.6.3" @@ -4540,7 +4384,7 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] name = "swagger-ui-bundle" version = "0.0.9" description = "swagger_ui_bundle - swagger-ui files in a pip package" -optional = true +optional = false python-versions = "*" files = [ {file = "swagger_ui_bundle-0.0.9-py3-none-any.whl", hash = "sha256:cea116ed81147c345001027325c1ddc9ca78c1ee7319935c3c75d3669279d575"}, @@ -4552,13 +4396,13 @@ Jinja2 = ">=2.0" [[package]] name = "synapseclient" -version = "4.4.1" +version = "4.6.0" description = "A client for Synapse, a collaborative, open-source research platform that allows teams to share data, track analyses, and collaborate." optional = false -python-versions = ">=3.8" +python-versions = "<3.13,>=3.9" files = [ - {file = "synapseclient-4.4.1-py3-none-any.whl", hash = "sha256:fe5716f234184ad0290c930f98383ce87bbf687221365ef477de826831c73994"}, - {file = "synapseclient-4.4.1.tar.gz", hash = "sha256:fc6ec5a0fd49edf2b05ecd7f69316784a4b813dd0fd259785932c0786d480629"}, + {file = "synapseclient-4.6.0-py3-none-any.whl", hash = "sha256:2da021585a6aa237a00fcca5992bc263523388c519d0b7e82525ba818bb5bd1b"}, + {file = "synapseclient-4.6.0.tar.gz", hash = "sha256:0d6561b399b9ecec71f319c3c512c845e152179237816212cc42f928d67166b8"}, ] [package.dependencies] @@ -4567,9 +4411,13 @@ asyncio-atexit = ">=1.0.1,<1.1.0" deprecated = ">=1.2.4,<2.0" httpx = ">=0.27.0,<0.28.0" nest-asyncio = ">=1.6.0,<1.7.0" -opentelemetry-api = ">=1.21.0,<1.22.0" -opentelemetry-exporter-otlp-proto-http = ">=1.21.0,<1.22.0" -opentelemetry-sdk = ">=1.21.0,<1.22.0" +opentelemetry-api = ">=1.21.0" +opentelemetry-exporter-otlp-proto-http = ">=1.21.0" +opentelemetry-instrumentation-httpx = ">=0.48b0" +opentelemetry-instrumentation-requests = ">=0.48b0" +opentelemetry-instrumentation-threading = ">=0.48b0" +opentelemetry-instrumentation-urllib = ">=0.48b0" +opentelemetry-sdk = ">=1.21.0" psutil = ">=5.9.8,<5.10.0" requests = ">=2.22.0,<3.0" tqdm = ">=4.66.2,<5.0" @@ -4583,20 +4431,6 @@ pandas = ["pandas (>=1.5,<3.0)"] pysftp = ["pysftp (>=0.2.8,<0.3)"] tests = ["flake8 (>=3.7.0,<4.0)", "func-timeout (>=4.3,<5.0)", "pandas (>=1.5,<3.0)", "pytest (>=7.0.0,<8.0)", "pytest-asyncio (>=0.23.6,<1.0)", "pytest-cov (>=4.1.0,<4.2.0)", "pytest-mock (>=3.0,<4.0)", "pytest-rerunfailures (>=12.0,<13.0)", "pytest-socket (>=0.6.0,<0.7.0)", "pytest-xdist[psutil] (>=2.2,<3.0.0)"] -[[package]] -name = "tabulate" -version = "0.9.0" -description = "Pretty-print tabular data" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, - {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, -] - -[package.extras] -widechars = ["wcwidth"] - [[package]] name = "tenacity" version = "8.5.0" @@ -4633,47 +4467,15 @@ docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] test = ["pre-commit", "pytest (>=7.0)", "pytest-timeout"] typing = ["mypy (>=1.6,<2.0)", "traitlets (>=5.11.1)"] -[[package]] -name = "threadloop" -version = "1.0.2" -description = "Tornado IOLoop Backed Concurrent Futures" -optional = true -python-versions = "*" -files = [ - {file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"}, - {file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"}, -] - -[package.dependencies] -tornado = "*" - -[[package]] -name = "thrift" -version = "0.20.0" -description = "Python bindings for the Apache Thrift RPC system" -optional = true -python-versions = "*" -files = [ - {file = "thrift-0.20.0.tar.gz", hash = "sha256:4dd662eadf6b8aebe8a41729527bd69adf6ceaa2a8681cbef64d1273b3e8feba"}, -] - -[package.dependencies] -six = ">=1.7.2" - -[package.extras] -all = ["tornado (>=4.0)", "twisted"] -tornado = ["tornado (>=4.0)"] -twisted = ["twisted"] - [[package]] name = "tinycss2" -version = "1.3.0" +version = "1.4.0" description = "A tiny CSS parser" optional = false python-versions = ">=3.8" files = [ - {file = "tinycss2-1.3.0-py3-none-any.whl", hash = "sha256:54a8dbdffb334d536851be0226030e9505965bb2f30f21a4a82c55fb2a80fae7"}, - {file = "tinycss2-1.3.0.tar.gz", hash = "sha256:152f9acabd296a8375fbca5b84c961ff95971fcfc32e79550c8df8e29118c54d"}, + {file = "tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289"}, + {file = "tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7"}, ] [package.dependencies] @@ -4696,13 +4498,13 @@ files = [ [[package]] name = "tomli" -version = "2.0.1" +version = "2.1.0" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"}, + {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"}, ] [[package]] @@ -4718,13 +4520,13 @@ files = [ [[package]] name = "toolz" -version = "0.12.1" +version = "1.0.0" description = "List processing tools and functional utilities" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "toolz-0.12.1-py3-none-any.whl", hash = "sha256:d22731364c07d72eea0a0ad45bafb2c2937ab6fd38a3507bf55eae8744aa7d85"}, - {file = "toolz-0.12.1.tar.gz", hash = "sha256:ecca342664893f177a13dac0e6b41cbd8ac25a358e5f215316d43e2100224f4d"}, + {file = "toolz-1.0.0-py3-none-any.whl", hash = "sha256:292c8f1c4e7516bf9086f8850935c799a874039c8bcf959d47b600e4c44a6236"}, + {file = "toolz-1.0.0.tar.gz", hash = "sha256:2c86e3d9a04798ac556793bced838816296a2f085017664e4995cb40a1047a02"}, ] [[package]] @@ -4749,13 +4551,13 @@ files = [ [[package]] name = "tqdm" -version = "4.66.5" +version = "4.67.0" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, - {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, + {file = "tqdm-4.67.0-py3-none-any.whl", hash = "sha256:0cd8af9d56911acab92182e88d763100d4788bdf421d251616040cc4d44863be"}, + {file = "tqdm-4.67.0.tar.gz", hash = "sha256:fe5a6f95e6fe0b9755e9469b77b9c3cf850048224ecaa8293d7d2d31f97d869a"}, ] [package.dependencies] @@ -4763,6 +4565,7 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [package.extras] dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +discord = ["requests"] notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] @@ -4784,13 +4587,13 @@ test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0, [[package]] name = "types-python-dateutil" -version = "2.9.0.20240906" +version = "2.9.0.20241003" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" files = [ - {file = "types-python-dateutil-2.9.0.20240906.tar.gz", hash = "sha256:9706c3b68284c25adffc47319ecc7947e5bb86b3773f843c73906fd598bc176e"}, - {file = "types_python_dateutil-2.9.0.20240906-py3-none-any.whl", hash = "sha256:27c8cc2d058ccb14946eebcaaa503088f4f6dbc4fb6093d3d456a49aef2753f6"}, + {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, + {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, ] [[package]] @@ -4821,13 +4624,13 @@ typing-extensions = ">=3.7.4" [[package]] name = "tzdata" -version = "2024.1" +version = "2024.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" files = [ - {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, - {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, + {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, + {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, ] [[package]] @@ -4890,39 +4693,23 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "uwsgi" -version = "2.0.26" +version = "2.0.28" description = "The uWSGI server" optional = true python-versions = "*" files = [ - {file = "uwsgi-2.0.26.tar.gz", hash = "sha256:86e6bfcd4dc20529665f5b7777193cdc48622fb2c59f0a7f1e3dc32b3882e7f9"}, + {file = "uwsgi-2.0.28.tar.gz", hash = "sha256:79ca1891ef2df14508ab0471ee8c0eb94bd2d51d03f32f90c4bbe557ab1e99d0"}, ] -[[package]] -name = "validators" -version = "0.20.0" -description = "Python Data Validation for Humans™." -optional = false -python-versions = ">=3.4" -files = [ - {file = "validators-0.20.0.tar.gz", hash = "sha256:24148ce4e64100a2d5e267233e23e7afeb55316b47d30faae7eb6e7292bc226a"}, -] - -[package.dependencies] -decorator = ">=3.4.0" - -[package.extras] -test = ["flake8 (>=2.4.0)", "isort (>=4.2.2)", "pytest (>=2.2.3)"] - [[package]] name = "virtualenv" -version = "20.26.3" +version = "20.27.1" description = "Virtual Python Environment builder" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"}, - {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"}, + {file = "virtualenv-20.27.1-py3-none-any.whl", hash = "sha256:f11f1b8a29525562925f745563bfd48b189450f61fb34c4f9cc79dd5aa32a1f4"}, + {file = "virtualenv-20.27.1.tar.gz", hash = "sha256:142c6be10212543b32c6c45d3d3893dff89112cc588b7d0879ae5a1ec03a47ba"}, ] [package.dependencies] @@ -4947,19 +4734,15 @@ files = [ [[package]] name = "webcolors" -version = "24.8.0" +version = "24.11.1" description = "A library for working with the color formats defined by HTML and CSS." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "webcolors-24.8.0-py3-none-any.whl", hash = "sha256:fc4c3b59358ada164552084a8ebee637c221e4059267d0f8325b3b560f6c7f0a"}, - {file = "webcolors-24.8.0.tar.gz", hash = "sha256:08b07af286a01bcd30d583a7acadf629583d1f79bfef27dd2c2c5c263817277d"}, + {file = "webcolors-24.11.1-py3-none-any.whl", hash = "sha256:515291393b4cdf0eb19c155749a096f779f7d909f7cceea072791cb9095b92e9"}, + {file = "webcolors-24.11.1.tar.gz", hash = "sha256:ecb3d768f32202af770477b8b65f318fa4f566c22948673a977b00d589dd80f6"}, ] -[package.extras] -docs = ["furo", "sphinx", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-notfound-page", "sphinxext-opengraph"] -tests = ["coverage[toml]"] - [[package]] name = "webencodings" version = "0.5.1" @@ -4991,7 +4774,7 @@ test = ["websockets"] name = "werkzeug" version = "2.3.8" description = "The comprehensive WSGI web application library." -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "werkzeug-2.3.8-py3-none-any.whl", hash = "sha256:bba1f19f8ec89d4d607a3bd62f1904bd2e609472d93cd85e9d4e178f472c3748"}, @@ -5096,13 +4879,13 @@ files = [ [[package]] name = "zipp" -version = "3.20.1" +version = "3.21.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "zipp-3.20.1-py3-none-any.whl", hash = "sha256:9960cd8967c8f85a56f920d5d507274e74f9ff813a0ab8889a5b5be2daf44064"}, - {file = "zipp-3.20.1.tar.gz", hash = "sha256:c22b14cc4763c5a5b04134207736c107db42e9d3ef2d9779d465f5f1bcba572b"}, + {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, + {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, ] [package.extras] @@ -5114,10 +4897,10 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", type = ["pytest-mypy"] [extras] -api = ["Flask", "Flask-Cors", "Jinja2", "connexion", "flask-opentracing", "jaeger-client", "opentelemetry-exporter-otlp-proto-grpc", "pyopenssl"] +api = ["Jinja2", "opentelemetry-api", "opentelemetry-exporter-otlp-proto-http", "opentelemetry-sdk", "pyopenssl"] aws = ["uWSGI"] [metadata] lock-version = "2.0" python-versions = ">=3.9.0,<3.11" -content-hash = "f814725d68db731c704f4ebcd169ae71e4031f0d939c3ce789145ddcb5f196eb" +content-hash = "3d585110760814bcfe58e9a6cc84226a1ed2dc16d17f5c1d0efc03f4e1500f9b" diff --git a/pyproject.toml b/pyproject.toml index f7045473c..ca2b0f5d3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "schematicpy" -version = "23.11.1" +version = "24.11.1" description = "Package for biomedical data model and metadata ingress management" authors = [ "Milen Nikolov ", @@ -34,10 +34,8 @@ packages = [ [tool.poetry.scripts] schematic = "schematic.__main__:main" - [tool.poetry.dependencies] python = ">=3.9.0,<3.11" - click = "^8.0.0" click-log = "^0.4.0" google-api-python-client = "^2.0.0" @@ -54,37 +52,35 @@ pygsheets = "^2.0.4" PyYAML = "^6.0.0" rdflib = "^6.0.0" setuptools = "^66.0.0" -synapseclient = "4.4.1" +synapseclient = "4.6.0" tenacity = "^8.0.1" toml = "^0.10.2" great-expectations = "^0.15.0" -sphinx-click = "^4.0.0" itsdangerous = "^2.0.0" openpyxl = "^3.0.9" "backports.zoneinfo" = {markers = "python_version < \"3.9\"", version = "^0.2.1"} -pdoc = "^14.0.0" dateparser = "^1.1.4" pandarallel = "^1.6.4" -schematic-db = {version = "0.0.41", extras = ["synapse"]} pyopenssl = {version = "^23.0.0", optional = true} dataclasses-json = "^0.6.1" pydantic = "^1.10.4" -connexion = {extras = ["swagger-ui"], version = "^2.8.0", optional = true} -Flask = {version = "2.1.3", optional = true} -Flask-Cors = {version = "^3.0.10", optional = true} +connexion = {extras = ["swagger-ui"], version = "^2.8.0"} +Flask = "2.1.3" +Flask-Cors = "^3.0.10" uWSGI = {version = "^2.0.21", optional = true} Jinja2 = {version = ">2.11.3", optional = true} asyncio = "^3.4.3" -jaeger-client = {version = "^4.8.0", optional = true} -flask-opentracing = {version="^2.0.0", optional = true} PyJWT = "^2.9.0" -opentelemetry-exporter-otlp-proto-grpc = {version="^1.0.0", optional = true} +opentelemetry-api = {version = ">=1.21.0", optional = true} +opentelemetry-sdk = {version = ">=1.21.0", optional = true} +opentelemetry-exporter-otlp-proto-http = {version="^1.0.0", optional = true} +opentelemetry-instrumentation-flask = ">=0.48b0" +requests-oauth2client = ">=1.6.0" [tool.poetry.extras] -api = ["connexion", "Flask", "Flask-Cors", "Jinja2", "pyopenssl", "jaeger-client", "flask-opentracing", "opentelemetry-exporter-otlp-proto-grpc"] +api = ["Jinja2", "pyopenssl", "opentelemetry-api", "opentelemetry-sdk", "opentelemetry-exporter-otlp-proto-http"] aws = ["uWSGI"] - [tool.poetry.group.dev.dependencies] pytest = "^8.0.0" pytest-cov = "^4.0.0" @@ -99,11 +95,10 @@ pylint = "^2.16.1" pytest-xdist = "^3.5.0" pre-commit = "^3.6.2" -[tool.poetry.group.aws] -optional = true - -[tool.poetry.group.aws.dependencies] - +[tool.poetry.group.doc.dependencies] +pdoc = "^14.0.0" +sphinx = "7.3.7" +sphinx-click = "4.4.0" [tool.black] line-length = 88 @@ -135,41 +130,3 @@ testpaths = [ filterwarnings = [ "ignore::DeprecationWarning" ] -markers = [ - """\ - google_credentials_needed: marks tests requiring \ - Google credentials (skipped on GitHub CI) \ - """, - """\ - submission: tests that involve submitting manifests - """, - """\ - not_windows: tests that don't work on on windows machine - """, - """\ - schematic_api: marks tests covering \ - API functionality (skipped on regular GitHub CI test suite) - """, - """\ - rule_combos: marks tests covering \ - combinations of rules that aren't always necessary \ - and can add significantly to CI runtime (skipped on GitHub CI unless prompted to run in commit message) - """, - """\ - table_operations: marks tests covering \ - table operations that pass locally \ - but fail on CI due to interactions with Synapse (skipped on GitHub CI) - """, - """\ - rule_benchmark: marks tests covering \ - validation rule benchmarking - """, - """\ - synapse_credentials_needed: marks api tests that \ - require synapse credentials to run - """, - """\ - empty_token: marks api tests that \ - send empty credentials in the request - """ -] \ No newline at end of file diff --git a/pytest.ini b/pytest.ini index 982e6ef86..c1e5f6e72 100644 --- a/pytest.ini +++ b/pytest.ini @@ -3,4 +3,18 @@ python_files = test_*.py asyncio_mode = auto asyncio_default_fixture_loop_scope = session log_cli = False -log_cli_level = INFO \ No newline at end of file +log_cli_level = INFO +markers = + google_credentials_needed: marks tests requiring Google credentials (skipped on GitHub CI) + submission: tests that involve submitting manifests + not_windows: tests that dont work on on windows machine + schematic_api: marks tests covering API functionality (skipped on regular GitHub CI test suite) + rule_combos: marks tests covering combinations of rules that arent always necessary and can add significantly to CI runtime (skipped on GitHub CI unless prompted to run in commit message) + table_operations: marks tests covering table operations that pass locally but fail on CI due to interactions with Synapse (skipped on GitHub CI) + rule_benchmark: marks tests covering validation rule benchmarking + synapse_credentials_needed: marks api tests that require synapse credentials to run + empty_token: marks api tests that send empty credentials in the request + manual_verification_required: Tests that require manual verification to fully validate + local_or_remote_api: Tests that can be configured to run against a local or remote API + single_process_execution: Tests that should run without xdist due to tests being flakey + slow_test: Tests that are very slow when running on github actions diff --git a/schematic/__init__.py b/schematic/__init__.py index 1b4ec14fe..718feb6a3 100644 --- a/schematic/__init__.py +++ b/schematic/__init__.py @@ -1,2 +1,183 @@ +import logging +import os +from typing import Dict, List + +import requests +from opentelemetry import trace +from opentelemetry._logs import set_logger_provider +from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.instrumentation.flask import FlaskInstrumentor +from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler +from opentelemetry.sdk._logs.export import BatchLogRecordProcessor +from opentelemetry.sdk.resources import DEPLOYMENT_ENVIRONMENT, SERVICE_NAME, Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor, Span +from opentelemetry.sdk.trace.sampling import ALWAYS_OFF +from requests_oauth2client import OAuth2Client, OAuth2ClientCredentialsAuth +from synapseclient import Synapse +from werkzeug import Request + from schematic.configuration.configuration import CONFIG from schematic.loader import LOADER +from schematic_api.api.security_controller import info_from_bearer_auth + +Synapse.allow_client_caching(False) +logger = logging.getLogger(__name__) + + +def create_telemetry_session() -> requests.Session: + """ + Create a requests session with authorization enabled if environment variables are set. + If no environment variables are set, the session will be created without authorization. + + Returns: + requests.Session: A session object with authorization enabled if environment + variables are set. If no environment variables are set, the session will be + created without authorization. If no telemetry export format is set, None + will be returned. + """ + tracing_export = os.environ.get("TRACING_EXPORT_FORMAT", None) + logging_export = os.environ.get("LOGGING_EXPORT_FORMAT", None) + if not (tracing_export or logging_export): + return None + + session = requests.Session() + static_otlp_headers = os.environ.get("OTEL_EXPORTER_OTLP_HEADERS", None) + if static_otlp_headers: + logger.info( + "Using static OTLP headers set in environment variable `OTEL_EXPORTER_OTLP_HEADERS`." + ) + return session + + client_id = os.environ.get("TELEMETRY_EXPORTER_CLIENT_ID", None) + client_secret = os.environ.get("TELEMETRY_EXPORTER_CLIENT_SECRET", None) + client_token_endpoint = os.environ.get( + "TELEMETRY_EXPORTER_CLIENT_TOKEN_ENDPOINT", None + ) + client_audience = os.environ.get("TELEMETRY_EXPORTER_CLIENT_AUDIENCE", None) + if ( + not client_id + or not client_secret + or not client_token_endpoint + or not client_audience + ): + logger.warning( + "No client_id, client_secret, client_audience, or token_endpoint provided for telemetry exporter. Telemetry data will be sent without authentication." + ) + return session + + oauth2client = OAuth2Client( + token_endpoint=client_token_endpoint, + client_id=client_id, + client_secret=client_secret, + ) + + auth = OAuth2ClientCredentialsAuth(client=oauth2client, audience=client_audience) + session.auth = auth + + return session + + +def set_up_tracing(session: requests.Session) -> None: + """Set up tracing for the API. + Args: + session: requests.Session object to use for exporting telemetry data. If + the exporter is set to OTLP, this session will be used to send the data. + If the exporter is set to file, this session will be ignored. + """ + tracing_export = os.environ.get("TRACING_EXPORT_FORMAT", None) + if tracing_export is not None and tracing_export: + Synapse.enable_open_telemetry(True) + tracing_service_name = os.environ.get("TRACING_SERVICE_NAME", "schematic-api") + deployment_environment = os.environ.get("DEPLOYMENT_ENVIRONMENT", "") + trace.set_tracer_provider( + TracerProvider( + resource=Resource( + attributes={ + SERVICE_NAME: tracing_service_name, + # TODO: Revisit this portion later on. As of 11/12/2024 when + # deploying this to ECS or running within a docker container, + # the package version errors out with the following error: + # importlib.metadata.PackageNotFoundError: No package metadata was found for schematicpy + # SERVICE_VERSION: package_version, + DEPLOYMENT_ENVIRONMENT: deployment_environment, + } + ) + ) + ) + FlaskInstrumentor().instrument( + request_hook=request_hook, response_hook=response_hook + ) + + if tracing_export == "otlp": + exporter = OTLPSpanExporter(session=session) + trace.get_tracer_provider().add_span_processor(BatchSpanProcessor(exporter)) + else: + trace.set_tracer_provider(TracerProvider(sampler=ALWAYS_OFF)) + + +def set_up_logging(session: requests.Session) -> None: + """Set up logging to export to OTLP.""" + logging_export = os.environ.get("LOGGING_EXPORT_FORMAT", None) + logging_service_name = os.environ.get("LOGGING_SERVICE_NAME", "schematic-api") + deployment_environment = os.environ.get("DEPLOYMENT_ENVIRONMENT", "") + if logging_export == "otlp": + resource = Resource.create( + { + SERVICE_NAME: logging_service_name, + DEPLOYMENT_ENVIRONMENT: deployment_environment, + } + ) + + logger_provider = LoggerProvider(resource=resource) + set_logger_provider(logger_provider=logger_provider) + + exporter = OTLPLogExporter(session=session) + logger_provider.add_log_record_processor(BatchLogRecordProcessor(exporter)) + handler = LoggingHandler(level=logging.NOTSET, logger_provider=logger_provider) + logging.getLogger().addHandler(handler) + + +def request_hook(span: Span, environ: Dict) -> None: + """ + Request hook for the flask server to handle setting attributes in the span. If + anything here happens to fail we do not want to stop the request from being + processed so we catch all exceptions and log them. + + Args: + span: The span object to set attributes in + environ: The environment variables from the request + """ + if not span or not span.is_recording(): + return + try: + if auth_header := environ.get("HTTP_AUTHORIZATION", None): + split_headers = auth_header.split(" ") + if len(split_headers) > 1: + token = auth_header.split(" ")[1] + user_info = info_from_bearer_auth(token) + if user_info: + span.set_attribute("user.id", user_info.get("sub")) + except Exception: + logger.exception("Failed to set user info in span") + + try: + if (request := environ.get("werkzeug.request", None)) and isinstance( + request, Request + ): + for arg in request.args: + span.set_attribute(key=f"schematic.{arg}", value=request.args[arg]) + except Exception: + logger.exception("Failed to set request info in span") + + +def response_hook(span: Span, status: str, response_headers: List) -> None: + """Nothing is implemented here yet, but it follows the same pattern as the + request hook.""" + pass + + +request_session = create_telemetry_session() +set_up_tracing(session=request_session) +set_up_logging(session=request_session) diff --git a/schematic/manifest/commands.py b/schematic/manifest/commands.py index 6b212239f..bc68cd03c 100644 --- a/schematic/manifest/commands.py +++ b/schematic/manifest/commands.py @@ -1,25 +1,21 @@ -import json -import os -import pandas as pd import logging -from pathlib import Path +import os import sys -from typing import get_args, List +from pathlib import Path +from typing import List, get_args + import click import click_log -from schematic.schemas.data_model_parser import DataModelParser -from schematic.schemas.data_model_graph import DataModelGraph, DataModelGraphExplorer -from schematic.manifest.generator import ManifestGenerator - -from schematic.utils.schema_utils import DisplayLabelType -from schematic.utils.cli_utils import log_value_from_config, query_dict, parse_syn_ids -from schematic.utils.google_api_utils import export_manifest_csv - +from schematic.configuration.configuration import CONFIG from schematic.help import manifest_commands - +from schematic.manifest.generator import ManifestGenerator +from schematic.schemas.data_model_graph import DataModelGraph, DataModelGraphExplorer +from schematic.schemas.data_model_parser import DataModelParser from schematic.store.synapse import SynapseStorage -from schematic.configuration.configuration import CONFIG +from schematic.utils.cli_utils import log_value_from_config, parse_syn_ids, query_dict +from schematic.utils.google_api_utils import export_manifest_csv +from schematic.utils.schema_utils import DisplayLabelType logger = logging.getLogger("schematic") click_log.basic_config(logger) @@ -343,14 +339,15 @@ def migrate_manifests( ) @click.pass_obj def download_manifest(ctx, dataset_id, new_manifest_name): - master_fileview = CONFIG["synapse"]["master_fileview"] - # use Synapse Storage store = SynapseStorage() # download existing file manifest_data = store.getDatasetManifest( - datasetId=dataset_id, downloadFile=True, newManifestName=new_manifest_name + datasetId=dataset_id, + downloadFile=True, + newManifestName=new_manifest_name, + use_temporary_folder=False, ) if not manifest_data: diff --git a/schematic/manifest/generator.py b/schematic/manifest/generator.py index 47acad4b4..69b86e136 100644 --- a/schematic/manifest/generator.py +++ b/schematic/manifest/generator.py @@ -3,8 +3,7 @@ import os from collections import OrderedDict from pathlib import Path -from tempfile import NamedTemporaryFile -from typing import Any, BinaryIO, Dict, List, Literal, Optional, Tuple, Union +from typing import Dict, List, Literal, Optional, Tuple, Union import networkx as nx import pandas as pd diff --git a/schematic/models/GE_Helpers.py b/schematic/models/GE_Helpers.py index 9eda117a8..c2e7146ec 100644 --- a/schematic/models/GE_Helpers.py +++ b/schematic/models/GE_Helpers.py @@ -1,21 +1,16 @@ import logging import os -import re -from statistics import mode -from tabnanny import check +import uuid # allows specifying explicit variable types -from typing import Any, Dict, List, Optional, Text -from urllib import error -from urllib.parse import urlparse -from urllib.request import HTTPDefaultErrorHandler, OpenerDirector, Request, urlopen +from typing import Dict, List import numpy as np -from attr import attr from great_expectations.core import ExpectationSuite from great_expectations.core.expectation_configuration import ExpectationConfiguration from great_expectations.data_context import BaseDataContext from great_expectations.data_context.types.base import ( + AnonymizedUsageStatisticsConfig, DataContextConfig, DatasourceConfig, FilesystemStoreBackendDefaults, @@ -24,7 +19,7 @@ ExpectationSuiteIdentifier, ) from great_expectations.exceptions.exceptions import GreatExpectationsError -from ruamel import yaml +from opentelemetry import trace import great_expectations as ge from schematic.models.validate_attribute import GenerateError @@ -38,7 +33,7 @@ ) logger = logging.getLogger(__name__) - +tracer = trace.get_tracer("Schematic") # List of modifiers that users can add to a rule, that arent rules themselves. # as additional modifiers are added will need to update this list @@ -93,6 +88,7 @@ def __init__(self, dmge, unimplemented_expectations, manifest, manifestPath): self.manifest = manifest self.manifestPath = manifestPath + @tracer.start_as_current_span("GreatExpectationsHelpers::build_context") def build_context(self): """ Purpose: @@ -119,6 +115,9 @@ def build_context(self): }, } + # Setting this to False prevents extra data from leaving schematic + anonymous_usage_statistics = AnonymizedUsageStatisticsConfig(enabled=False) + # create data context configuration data_context_config = DataContextConfig( datasources={ @@ -136,6 +135,7 @@ def build_context(self): store_backend_defaults=FilesystemStoreBackendDefaults( root_directory=os.path.join(os.getcwd(), "great_expectations") ), + anonymous_usage_statistics=anonymous_usage_statistics, ) # build context and add data source @@ -143,6 +143,9 @@ def build_context(self): # self.context.test_yaml_config(yaml.dump(datasource_config)) self.context.add_datasource(**datasource_config) + @tracer.start_as_current_span( + "GreatExpectationsHelpers::add_expectation_suite_if_not_exists" + ) def add_expectation_suite_if_not_exists(self) -> ExpectationSuite: """ Purpose: @@ -151,30 +154,18 @@ def add_expectation_suite_if_not_exists(self) -> ExpectationSuite: Returns: saves expectation suite and identifier to self """ - self.expectation_suite_name = "Manifest_test_suite" - # Get a list of all expectation suites - suite_names = self.context.list_expectation_suite_names() - # Get a list of all checkpoints - all_checkpoints = self.context.list_checkpoints() - - # if the suite exists, delete it - if self.expectation_suite_name in suite_names: - self.context.delete_expectation_suite(self.expectation_suite_name) - - # also delete all the checkpoints associated with the suite - if all_checkpoints: - for checkpoint_name in all_checkpoints: - self.context.delete_checkpoint(checkpoint_name) - - self.suite = self.context.add_expectation_suite( + self.expectation_suite_name = f"Manifest_test_suite_{uuid.uuid4()}" + expectation_suite = self.context.add_expectation_suite( expectation_suite_name=self.expectation_suite_name, ) + self.suite = expectation_suite return self.suite + @tracer.start_as_current_span("GreatExpectationsHelpers::build_expectation_suite") def build_expectation_suite( self, - ): + ) -> None: """ Purpose: Construct an expectation suite to validate columns with rules that have expectations @@ -374,9 +365,11 @@ def build_expectation_suite( suite_identifier = ExpectationSuiteIdentifier( expectation_suite_name=self.expectation_suite_name ) - self.context.build_data_docs(resource_identifiers=[suite_identifier]) - ##Webpage DataDocs opened here: - # self.context.open_data_docs(resource_identifier=suite_identifier) + + if logger.isEnabledFor(logging.DEBUG): + self.context.build_data_docs(resource_identifiers=[suite_identifier]) + # Webpage DataDocs opened here: + # self.context.open_data_docs(resource_identifier=suite_identifier) def add_expectation( self, @@ -421,7 +414,7 @@ def build_checkpoint(self): adds checkpoint to self """ # create manifest checkpoint - self.checkpoint_name = "manifest_checkpoint" + self.checkpoint_name = f"manifest_checkpoint_{uuid.uuid4()}" checkpoint_config = { "name": self.checkpoint_name, "config_version": 1, @@ -486,6 +479,8 @@ def generate_errors( if ( "exception_info" in result_dict.keys() + # This changes in 0.18.x of GE, details on this: + # https://docs.greatexpectations.io/docs/0.18/reference/learn/terms/validation_result/ and result_dict["exception_info"]["exception_message"] ): raise GreatExpectationsError( @@ -501,6 +496,14 @@ def generate_errors( # because type validation is column aggregate expectation and not column map expectation when columns are not of object type, # indices and values cannot be returned else: + # This changes in 0.17.x of GE, refactored code: + # for i, item in enumerate(self.manifest[errColumn]): + # observed_type = result_dict.get("result", {}).get("observed_value", None) + # is_instance_type = observed_type is not None and isinstance( + # item, type_dict[observed_type] + # ) + # indices.append(i) if is_instance_type else indices + # values.append(item) if is_instance_type else values for i, item in enumerate(self.manifest[errColumn]): observed_type = result_dict["result"]["observed_value"] indices.append(i) if isinstance( diff --git a/schematic/models/metadata.py b/schematic/models/metadata.py index 582a00168..4402d8a59 100644 --- a/schematic/models/metadata.py +++ b/schematic/models/metadata.py @@ -192,6 +192,7 @@ def get_component_requirements( return req_components # TODO: abstract validation in its own module + @tracer.start_as_current_span("MetadataModel::validateModelManifest") def validateModelManifest( self, manifestPath: str, @@ -273,10 +274,6 @@ def validateModelManifest( return errors, warnings - # check if suite has been created. If so, delete it - if os.path.exists("great_expectations/expectations/Manifest_test_suite.json"): - os.remove("great_expectations/expectations/Manifest_test_suite.json") - errors, warnings, manifest = validate_all( self, errors=errors, diff --git a/schematic/models/validate_attribute.py b/schematic/models/validate_attribute.py index e196bbe14..9b13bebaf 100644 --- a/schematic/models/validate_attribute.py +++ b/schematic/models/validate_attribute.py @@ -11,6 +11,7 @@ import pandas as pd import requests from jsonschema import ValidationError +from opentelemetry import trace from synapseclient import File from synapseclient.core.exceptions import SynapseNoCredentialsError @@ -27,9 +28,11 @@ ) logger = logging.getLogger(__name__) +tracer = trace.get_tracer("Schematic") MessageLevelType = Literal["warning", "error"] ScopeTypes = Literal["set", "value"] +tracer = trace.get_tracer("schematic") class GenerateError: @@ -775,6 +778,7 @@ class ValidateAttribute(object): def __init__(self, dmge: DataModelGraphExplorer) -> None: self.dmge = dmge + @tracer.start_as_current_span("ValidateAttribute::_login") def _login( self, access_token: Optional[str] = None, @@ -873,7 +877,7 @@ def get_target_manifests( project_scope: Optional[list[str]], access_token: Optional[str] = None, ) -> tuple[list[str], list[str]]: - """Gets a list of synapse ids of mainfests to check against + """Gets a list of synapse ids of manifests to check against Args: target_component (str): Manifet ids are gotten fo this type @@ -914,14 +918,14 @@ def get_target_manifests( def list_validation( self, val_rule: str, - manifest_col: pd.core.series.Series, - ) -> tuple[list[list[str]], list[list[str]], pd.core.series.Series]: + manifest_col: pd.Series, + ) -> tuple[list[list[str]], list[list[str]], pd.Series]: """ Purpose: Determine if values for a particular attribute are comma separated. Input: - val_rule: str, Validation rule - - manifest_col: pd.core.series.Series, column for a given attribute + - manifest_col: pd.Series, column for a given attribute Returns: - manifest_col: Input values in manifest arere-formatted to a list logger.error or logger.warning. @@ -932,8 +936,8 @@ def list_validation( # For each 'list' (input as a string with a , delimiter) entered, # convert to a real list of strings, with leading and trailing # white spaces removed. - errors = [] - warnings = [] + errors: list[list[str]] = [] + warnings: list[list[str]] = [] replace_null = True csv_re = comma_separated_list_regex() @@ -954,7 +958,10 @@ def list_validation( entry=list_string, node_display_name=manifest_col.name, ) - + # Because of the above line: manifest_col = manifest_col.astype(str) + # this column has been turned into a string, it's unclear if any values + # from this column can be anything other than a string, and therefore this + # if statement may not be needed if not isinstance(list_string, str) and entry_has_value: list_error = "not_a_string" elif not re.fullmatch(csv_re, list_string) and entry_has_value: @@ -983,7 +990,7 @@ def list_validation( def regex_validation( self, val_rule: str, - manifest_col: pd.core.series.Series, + manifest_col: pd.Series, ) -> tuple[list[list[str]], list[list[str]]]: """ Purpose: @@ -991,7 +998,7 @@ def regex_validation( provided in val_rule. Input: - val_rule: str, Validation rule - - manifest_col: pd.core.series.Series, column for a given + - manifest_col: pd.Series, column for a given attribute in the manifest - dmge: DataModelGraphExplorer Object Using this module requres validation rules written in the following manner: @@ -1005,8 +1012,8 @@ def regex_validation( - This function will return errors when the user input value does not match schema specifications. logger.error or logger.warning. - Errors: list[str] Error details for further storage. - warnings: list[str] Warning details for further storage. + Errors: list[list[str]] Error details for further storage. + warnings: list[list[str]] Warning details for further storage. TODO: move validation to convert step. """ @@ -1022,13 +1029,15 @@ def regex_validation( f" They should be provided as follows ['regex', 'module name', 'regular expression']" ) - errors = [] - warnings = [] + errors: list[list[str]] = [] + warnings: list[list[str]] = [] validation_rules = self.dmge.get_node_validation_rules( node_display_name=manifest_col.name ) - + # It seems like this statement can never be true + # self.dmge.get_node_validation_rules never returns a list with "::" even when + # the attribute has the "list::regex" rule if validation_rules and "::" in validation_rules[0]: validation_rules = validation_rules[0].split("::") # Handle case where validating re's within a list. @@ -1096,26 +1105,43 @@ def regex_validation( def type_validation( self, val_rule: str, - manifest_col: pd.core.series.Series, + manifest_col: pd.Series, ) -> tuple[list[list[str]], list[list[str]]]: """ - Purpose: - Check if values for a given manifest attribue are the same type + Check if values for a given manifest attribute are the same type specified in val_rule. - Input: - - val_rule: str, Validation rule, specifying input type, either + + Args: + val_rule (str): Validation rule, specifying input type, either 'float', 'int', 'num', 'str' - - manifest_col: pd.core.series.Series, column for a given + manifest_col (pd.Series): column for a given attribute in the manifest + + Raises: + ValueError: If after splitting the validation rule by spaces, + there are no components left + ValueError: If after splitting the validation rule by spaces, + there are more than two components left + ValueError: If after splitting the validation rule by spaces, + the first component is not one of 'float', 'int', 'num', 'str' + Returns: - -This function will return errors when the user input value - does not match schema specifications. - logger.error or logger.warning. - Errors: list[str] Error details for further storage. - warnings: list[str] Warning details for further storage. - TODO: - Convert all inputs to .lower() just to prevent any entry errors. + tuple[list[list[str]], list[list[str]]]: _description_ """ + val_rule_components = val_rule.split(" ") + if len(val_rule_components) == 0: + raise ValueError("val_rule must contain at least one component.") + if len(val_rule_components) > 2: + raise ValueError("val_rule must contain no more than two components.") + val_rule_type = val_rule_components[0] + if val_rule_type not in ["float", "int", "num", "str"]: + raise ValueError( + ( + f"val_rule first component: {val_rule_type} must be one of " + "['float', 'int', 'num', 'str']" + ) + ) + specified_type = { "num": (int, np.int64, float), "int": (int, np.int64), @@ -1123,11 +1149,11 @@ def type_validation( "str": (str), } - errors = [] - warnings = [] + errors: list[list[str]] = [] + warnings: list[list[str]] = [] # num indicates either a float or int. - if val_rule == "num": + if val_rule_type == "num": for i, value in enumerate(manifest_col): entry_has_value = self.get_entry_has_value( entry=value, @@ -1135,7 +1161,7 @@ def type_validation( ) if ( bool(value) - and not isinstance(value, specified_type[val_rule]) + and not isinstance(value, specified_type[val_rule_type]) and entry_has_value ): vr_errors, vr_warnings = GenerateError.generate_type_error( @@ -1149,7 +1175,7 @@ def type_validation( errors.append(vr_errors) if vr_warnings: warnings.append(vr_warnings) - elif val_rule in ["int", "float", "str"]: + elif val_rule_type in ["int", "float", "str"]: for i, value in enumerate(manifest_col): entry_has_value = self.get_entry_has_value( entry=value, @@ -1157,7 +1183,7 @@ def type_validation( ) if ( bool(value) - and not isinstance(value, specified_type[val_rule]) + and not isinstance(value, specified_type[val_rule_type]) and entry_has_value ): vr_errors, vr_warnings = GenerateError.generate_type_error( @@ -1286,14 +1312,14 @@ def url_validation( return errors, warnings def _parse_validation_log( - self, validation_log: dict[str, pd.core.series.Series] - ) -> tuple[[list[str], list[str], list[str]]]: + self, validation_log: dict[str, pd.Series] + ) -> tuple[list[str], list[str], list[str]]: """Parse validation log, so values can be used to raise warnings/errors Args: - validation_log, dict[str, pd.core.series.Series]: + validation_log, dict[str, pd.Series]: Returns: invalid_rows, list: invalid rows recorded in the validation log - invalid_enties, list: invalid values recorded in the validation log + invalid_entities, list: invalid values recorded in the validation log manifest_ids, list: """ # Initialize parameters @@ -1313,12 +1339,15 @@ def _parse_validation_log( return invalid_rows, invalid_entries, manifest_ids def _merge_format_invalid_rows_values( - self, series_1: pd.core.series.Series, series_2: pd.core.series.Series - ) -> tuple[[list[str], list[str]]]: - """Merge two series to identify gather all invalid values, and parse out invalid rows and entries + self, series_1: pd.Series, series_2: pd.Series + ) -> tuple[list[str], list[str]]: + """ + Merge two series to identify gather all invalid values, + and parse out invalid rows and entries + Args: - series_1, pd.core.series.Series: first set of invalid values to extract - series_2, pd.core.series.Series: second set of invalid values to extract + series_1, pd.Series: first set of invalid values to extract + series_2, pd.Series: second set of invalid values to extract Returns: invalid_rows, list: invalid rows taken from both series invalid_entry, list: invalid values taken from both series @@ -1342,12 +1371,14 @@ def _merge_format_invalid_rows_values( return invalid_rows, invalid_entry def _format_invalid_row_values( - self, invalid_values: dict[str, pd.core.series.Series] - ) -> tuple[[list[str], list[str]]]: - """Parse invalid_values dictionary, to extract invalid_rows and invalid_entry to be used later - to raise warnings or errors. + self, invalid_values: pd.Series + ) -> tuple[list[str], list[str]]: + """ + Parse invalid_values, to extract invalid_rows and invalid_entry + to be used later to raise warnings or errors. + Args: - invalid_values, dict[str, pd.core.series.Series]: + invalid_values, pd.Series: Returns: invalid_rows, list: invalid rows recorded in invalid_values invalid_entry, list: invalid values recorded in invalid_values @@ -1383,9 +1414,9 @@ def _gather_set_warnings_errors( Returns: errors, list[str]: list of errors to raise, as appropriate, if values in current manifest do - not pass relevant cross mannifest validation across the target manifest(s) + not pass relevant cross manifest validation across the target manifest(s) warnings, list[str]: list of warnings to raise, as appropriate, if values in current manifest do - not pass relevant cross mannifest validation across the target manifest(s) + not pass relevant cross manifest validation across the target manifest(s) """ errors: list[str] = [] warnings: list[str] = [] @@ -1440,21 +1471,28 @@ def _remove_non_entry_from_invalid_entry_list( row_num: Optional[list[str]], attribute_name: str, ) -> tuple[list[str], list[str]]: - """Helper to remove NAs from a list of invalid entries (if applicable, and allowed), remove the row - too from row_num. This will make sure errors are not rasied for NA entries unless the value is required. + """ + Helper to remove NAs from a list of invalid entries (if applicable, and allowed), + remove the row too from row_num. This will make sure errors are not raised for + NA entries unless the value is required. + Args: invalid_entry, list[str]: default=None, list of entries in the source manifest where - invalid values were located. - row_num, list[str[: default=None, list of rows in the source manifest where invalid values were located + invalid values were located. + row_num, list[str[: default=None, list of rows in the source manifest where invalid + values were located attribute_name, str: source attribute name + Returns: - invalid_entry and row_num returned with any NA and corresponding row index value removed, if applicable. + invalid_entry and row_num returned with any NA and corresponding row index value + removed, if applicable. """ idx_to_remove = [] # Check if the current attribute column is required, via the data model if invalid_entry and row_num: # Check each invalid entry and determine if it has a value and/or is required. - # If there is no entry and its not required, remove the NA value so an error is not raised. + # If there is no entry and its not required, remove the NA value so an + # error is not raised. for idx, entry in enumerate(invalid_entry): entry_has_value = self.get_entry_has_value(entry, attribute_name) # If there is no value, and is not required, recored the index @@ -1466,8 +1504,8 @@ def _remove_non_entry_from_invalid_entry_list( for idx in sorted(idx_to_remove, reverse=True): del invalid_entry[idx] del row_num[idx] - # Perform check to make sure length of invalid_entry and row_num is the same. If not that would suggest - # there was an issue recording or removing values. + # Perform check to make sure length of invalid_entry and row_num is the same. + # If not that would suggest there was an issue recording or removing values. if len(invalid_entry) != len(row_num): logger.error( f"There was an error handling and validating a non-entry." @@ -1530,17 +1568,22 @@ def _gather_value_warnings_errors( source_attribute: str, value_validation_store: tuple[pd.Series, pd.Series, pd.Series], ) -> tuple[list[str], list[str]]: - """For value rule scope, find invalid rows and entries, and generate appropriate errors and warnings + """ + For value rule scope, find invalid rows and entries, and generate + appropriate errors and warnings + Args: val_rule, str: Validation rule source_attribute, str: source manifest column name value_validation_store, tuple(pd.Series, pd.Series, pd.Series]): contains missing_values, duplicated_values, and repeat values Returns: - errors, list[str]: list of errors to raise, as appropriate, if values in current manifest do - not pass relevant cross mannifest validation across the target manifest(s) - warnings, list[str]: list of warnings to raise, as appropriate, if values in current manifest do - not pass relevant cross mannifest validation across the target manifest(s) + errors, list[str]: list of errors to raise, as appropriate, if values + in current manifest do not pass relevant cross manifest validation + across the target manifest(s) + warnings, list[str]: list of warnings to raise, as appropriate, + if values in current manifest do not pass relevant cross manifest + validation across the target manifest(s) """ # Initialize with empty lists errors, warnings = [], [] @@ -1580,17 +1623,22 @@ def _gather_value_warnings_errors( def _check_if_target_manifest_is_empty( self, - target_manifest: pd.core.series.Series, + target_manifest: pd.DataFrame, target_manifest_empty: list[bool], column_names: dict[str, str], ) -> list[bool]: - """If a target manifest is found with the attribute column of interest check to see if the manifest is empty. + """ + If a target manifest is found with the attribute column of interest check to see if + the manifest is empty. + Args: - target_manifest, pd.core.series.Series: Current target manifest - target_manifest_empty, list[bool]: a list of booleans recording if the target manifest are emtpy or not. + target_manifest, pd.Dataframe: Current target manifest + target_manifest_empty, list[bool]: a list of booleans recording if the target manifest + are empty or not. column_names, dict[str, str]: {stripped_col_name:original_column_name} Returns: - target_manifest_empty, list[bool]: a list of booleans recording if the target manifest are emtpy or not. + target_manifest_empty, list[bool]: a list of booleans recording if the target manifest + are empty or not. """ # Make a copy of the target manifest with only user uploaded columns target_manifest_dupe = target_manifest.drop( @@ -1962,6 +2010,7 @@ def _run_validation_across_target_manifests( return (start_time, validation_store) + @tracer.start_as_current_span("ValidateAttribute::cross_validation") def cross_validation( self, val_rule: str, @@ -2037,10 +2086,11 @@ def cross_validation( return errors, warnings + @tracer.start_as_current_span("ValidateAttribute::filename_validation") def filename_validation( self, val_rule: str, - manifest: pd.core.frame.DataFrame, + manifest: pd.DataFrame, access_token: str, dataset_scope: str, project_scope: Optional[list] = None, @@ -2050,7 +2100,7 @@ def filename_validation( Validate the filenames in the manifest against the data paths in the fileview. Args: val_rule: str, Validation rule for the component - manifest: pd.core.frame.DataFrame, manifest + manifest: pd.DataFrame, manifest access_token: str, Asset Store access token dataset_scope: str, Dataset with files to validate against project_scope: Optional[list] = None: Projects to limit the scope of cross manifest validation to. diff --git a/schematic/models/validate_manifest.py b/schematic/models/validate_manifest.py index 3b85b1414..04b75c82a 100644 --- a/schematic/models/validate_manifest.py +++ b/schematic/models/validate_manifest.py @@ -1,27 +1,20 @@ import json import logging -import os -import re -import sys +import uuid from numbers import Number -from statistics import mode -from tabnanny import check from time import perf_counter # allows specifying explicit variable types -from typing import Any, Dict, List, Optional, Text -from urllib import error -from urllib.parse import urlparse -from urllib.request import HTTPDefaultErrorHandler, OpenerDirector, Request, urlopen +from typing import List, Optional, Tuple import numpy as np import pandas as pd -from jsonschema import Draft7Validator, ValidationError, exceptions +from jsonschema import Draft7Validator, exceptions +from opentelemetry import trace from schematic.models.GE_Helpers import GreatExpectationsHelpers from schematic.models.validate_attribute import GenerateError, ValidateAttribute from schematic.schemas.data_model_graph import DataModelGraphExplorer -from schematic.store.synapse import SynapseStorage from schematic.utils.schema_utils import extract_component_validation_rules from schematic.utils.validate_rules_utils import validation_rule_info from schematic.utils.validate_utils import ( @@ -30,6 +23,7 @@ ) logger = logging.getLogger(__name__) +tracer = trace.get_tracer("Schematic") class ValidateManifest(object): @@ -69,13 +63,13 @@ def get_multiple_types_error( def check_max_rule_num( self, validation_rules: list[str], - col: pd.core.series.Series, + col: pd.Series, errors: list[list[str]], ) -> list[list[str]]: """Check that user isnt applying more rule combinations than allowed. Do not consider certain rules as a part of this rule limit. Args: validation_rules, list: Validation rules for current manifest column/attribute being evaluated - col, pd.core.series.Series: the current manifest column being evaluated + col, pd.Series: the current manifest column being evaluated errors, list[list[str]]: list of errors being compiled. Returns: errors, list[list[str]]: list of errors being compiled, with additional error list being appended if appropriate @@ -99,27 +93,28 @@ def check_max_rule_num( ) return errors + @tracer.start_as_current_span("ValidateManifest::validate_manifest_rules") def validate_manifest_rules( self, - manifest: pd.core.frame.DataFrame, + manifest: pd.DataFrame, dmge: DataModelGraphExplorer, restrict_rules: bool, project_scope: list[str], dataset_scope: Optional[str] = None, access_token: Optional[str] = None, - ) -> (pd.core.frame.DataFrame, list[list[str]]): + ) -> Tuple[pd.DataFrame, list[list[str]]]: """ Purpose: Take validation rules set for a particular attribute and validate manifest entries based on these rules. Input: - manifest: pd.core.frame.DataFrame + manifest: pd.DataFrame imported from models/metadata.py contains metadata input from user for each attribute. dmge: DataModelGraphExplorer initialized within models/metadata.py Returns: - manifest: pd.core.frame.DataFrame + manifest: pd.DataFrame If a 'list' validatior is run, the manifest needs to be updated to change the attribute column values to a list. In this case the manifest will be updated then exported. @@ -139,12 +134,6 @@ def validate_manifest_rules( validation_types = validation_rule_info() - type_dict = { - "float64": float, - "int64": int, - "str": str, - } - unimplemented_expectations = [ "url", "list", @@ -174,51 +163,63 @@ def validate_manifest_rules( warnings = [] if not restrict_rules: - t_GE = perf_counter() + if logger.isEnabledFor(logging.DEBUG): + t_GE = perf_counter() # operations necessary to set up and run ge suite validation - ge_helpers = GreatExpectationsHelpers( - dmge=dmge, - unimplemented_expectations=unimplemented_expectations, - manifest=manifest, - manifestPath=self.manifestPath, - ) - - ge_helpers.build_context() - ge_helpers.build_expectation_suite() - ge_helpers.build_checkpoint() - - try: - # run GE validation - results = ge_helpers.context.run_checkpoint( - checkpoint_name=ge_helpers.checkpoint_name, - batch_request={ - "runtime_parameters": {"batch_data": manifest}, - "batch_identifiers": {"default_identifier_name": "manifestID"}, - }, - result_format={"result_format": "COMPLETE"}, - ) - finally: - ge_helpers.context.delete_checkpoint(ge_helpers.checkpoint_name) - ge_helpers.context.delete_expectation_suite( - ge_helpers.expectation_suite_name + with tracer.start_as_current_span( + "ValidateManifest::validate_manifest_rules::GreatExpectationsValidation" + ): + ge_helpers = GreatExpectationsHelpers( + dmge=dmge, + unimplemented_expectations=unimplemented_expectations, + manifest=manifest, + manifestPath=self.manifestPath, ) - validation_results = results.list_validation_results() + ge_helpers.build_context() + ge_helpers.build_expectation_suite() + ge_helpers.build_checkpoint() + + try: + # run GE validation + with tracer.start_as_current_span( + "ValidateManifest::validate_manifest_rules::GreatExpectationsValidation::run_checkpoint" + ): + results = ge_helpers.context.run_checkpoint( + checkpoint_name=ge_helpers.checkpoint_name, + batch_request={ + "runtime_parameters": {"batch_data": manifest}, + "batch_identifiers": { + "default_identifier_name": f"manifestID_{uuid.uuid4()}" + }, + }, + result_format={"result_format": "COMPLETE"}, + ) + finally: + ge_helpers.context.delete_checkpoint( + name=ge_helpers.checkpoint_name + ) + ge_helpers.context.delete_expectation_suite( + expectation_suite_name=ge_helpers.expectation_suite_name + ) - # parse validation results dict and generate errors - errors, warnings = ge_helpers.generate_errors( - errors=errors, - warnings=warnings, - validation_results=validation_results, - validation_types=validation_types, - dmge=dmge, - ) - logger.debug(f"GE elapsed time {perf_counter()-t_GE}") + validation_results = results.list_validation_results() + + # parse validation results dict and generate errors + errors, warnings = ge_helpers.generate_errors( + errors=errors, + warnings=warnings, + validation_results=validation_results, + validation_types=validation_types, + dmge=dmge, + ) + if logger.isEnabledFor(logging.DEBUG): + logger.debug(f"GE elapsed time {perf_counter()-t_GE}") else: logger.info("Great Expetations suite will not be utilized.") - t_err = perf_counter() - regex_re = re.compile("regex.*") + if logger.isEnabledFor(logging.DEBUG): + t_err = perf_counter() # Instantiate Validate Attribute validate_attribute = ValidateAttribute(dmge=dmge) @@ -255,7 +256,8 @@ def validate_manifest_rules( ) continue - t_indiv_rule = perf_counter() + if logger.isEnabledFor(logging.DEBUG): + t_indiv_rule = perf_counter() # Validate for each individual validation rule. validation_method = getattr( validate_attribute, validation_types[validation_type]["type"] @@ -289,10 +291,14 @@ def validate_manifest_rules( errors.extend(vr_errors) if vr_warnings: warnings.extend(vr_warnings) - logger.debug( - f"Rule {rule} elapsed time: {perf_counter()-t_indiv_rule}" - ) - logger.debug(f"In House validation elapsed time {perf_counter()-t_err}") + + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + f"Rule {rule} elapsed time: {perf_counter()-t_indiv_rule}" + ) + + if logger.isEnabledFor(logging.DEBUG): + logger.debug(f"In House validation elapsed time {perf_counter()-t_err}") return manifest, errors, warnings def validate_manifest_values( @@ -300,12 +306,11 @@ def validate_manifest_values( manifest, jsonSchema, dmge, - ) -> (List[List[str]], List[List[str]]): + ) -> Tuple[List[List[str]], List[List[str]]]: t_json_schema = perf_counter() errors = [] warnings = [] - col_attr = {} # save the mapping between column index and attribute name manifest = convert_nan_entries_to_empty_strings(manifest=manifest) @@ -321,12 +326,21 @@ def validate_manifest_values( annotations = json.loads(manifest.to_json(orient="records")) for i, annotation in enumerate(annotations): v = Draft7Validator(jsonSchema) - for error in sorted(v.iter_errors(annotation), key=exceptions.relevance): + for sorted_error in sorted( + v.iter_errors(annotation), key=exceptions.relevance + ): errorRow = str(i + 2) - errorCol = error.path[-1] if len(error.path) > 0 else "Wrong schema" - errorColName = error.path[0] if len(error.path) > 0 else "Wrong schema" - errorMsg = error.message[0:500] - errorVal = error.instance if len(error.path) > 0 else "Wrong schema" + errorColName = ( + sorted_error.path[0] + if len(sorted_error.path) > 0 + else "Wrong schema" + ) + errorMsg = sorted_error.message[0:500] + errorVal = ( + sorted_error.instance + if len(sorted_error.path) > 0 + else "Wrong schema" + ) val_errors, val_warnings = GenerateError.generate_schema_error( row_num=errorRow, diff --git a/schematic/schemas/data_model_parser.py b/schematic/schemas/data_model_parser.py index 0da26e933..6434d5b70 100644 --- a/schematic/schemas/data_model_parser.py +++ b/schematic/schemas/data_model_parser.py @@ -1,20 +1,17 @@ "Data Model Parser" +import logging import pathlib -from typing import Any, Union, Optional +from typing import Any, Optional, Union -import logging import pandas as pd from opentelemetry import trace +from schematic.schemas.data_model_relationships import DataModelRelationships from schematic.utils.df_utils import load_df from schematic.utils.io_utils import load_json from schematic.utils.schema_utils import attr_dict_template -from schematic.schemas.data_model_relationships import DataModelRelationships - -from schematic import LOADER - logger = logging.getLogger("Schemas") tracer = trace.get_tracer("Schematic") @@ -49,6 +46,9 @@ def _get_base_schema_path(self, base_schema: Optional[str] = None) -> str: Returns: base_schema_path: Path to base schema based on provided argument. """ + # Lazy import to avoid circular imports + from schematic import LOADER # pylint: disable=import-outside-toplevel + biothings_schema_path = LOADER.filename("data_models/biothings.model.jsonld") self.base_schema_path = ( biothings_schema_path if base_schema is None else base_schema diff --git a/schematic/schemas/data_model_validator.py b/schematic/schemas/data_model_validator.py index ca3b7e31b..2abc3b67e 100644 --- a/schematic/schemas/data_model_validator.py +++ b/schematic/schemas/data_model_validator.py @@ -2,6 +2,9 @@ import time import logging +from typing import Tuple, Any, Iterable +import itertools +from dataclasses import dataclass import multiprocessing import networkx as nx # type: ignore @@ -10,8 +13,37 @@ logger = logging.getLogger(__name__) +# Characters display names of nodes that are not allowed +BLACKLISTED_CHARACTERS = ["(", ")", ".", "-"] +# Names of nodes that are used internally +RESERVED_NAMES = {"entityId"} -class DataModelValidator: + +""" +A list of node tuples. +The first item is the name of the node. +The second item is a dict of its fields. +This object is gotten from doing nx.MultiDiGraph.nodes(data=True) +""" + + +@dataclass +class Node: + """A node in graph from the data model.""" + + name: Any + """Name of the node.""" + + fields: dict + """Fields of the node""" + + def __post_init__(self) -> None: + if "displayName" not in self.fields: + raise ValueError(f"Node: {str(self.name)} missing displayName field") + self.display_name = str(self.fields["displayName"]) + + +class DataModelValidator: # pylint: disable=too-few-public-methods """ Check for consistency within data model. """ @@ -23,17 +55,14 @@ def __init__( """ Args: graph (nx.MultiDiGraph): Graph representation of the data model. - TODO: put blacklisted chars and reserved_names in some global space where - they can be accessed centrally """ self.graph = graph + self.node_info = [ + Node(node[0], node[1]) for node in self.graph.nodes(data=True) + ] self.dmr = DataModelRelationships() - # Define blacklisted characters, taken from store.synapse - self.blacklisted_chars = ["(", ")", ".", "-"] - # Define reserved_names, taken from Documentation - self.reserved_names = {"entityId"} - def run_checks(self) -> tuple[list, list]: + def run_checks(self) -> tuple[list[list[str]], list[list[str]]]: """Run all validation checks on the data model graph. Returns: @@ -43,49 +72,28 @@ def run_checks(self) -> tuple[list, list]: run additional tests, or move some to issuing only warnings, vice versa. """ error_checks = [ - self.check_graph_has_required_node_fields(), - self.check_is_dag(), - self.check_reserved_names(), + self._check_graph_has_required_node_fields(), + self._check_is_dag(), + self._check_reserved_names(), ] warning_checks = [ - self.check_blacklisted_characters(), + self._check_blacklisted_characters(), ] errors = [error for error in error_checks if error] warnings = [warning for warning in warning_checks if warning] return errors, warnings - def check_graph_has_required_node_fields(self) -> list[str]: + def _check_graph_has_required_node_fields(self) -> list[str]: """Checks that the graph has the required node fields for all nodes. + Returns: - error, list: List of error messages for each missing field. + list[str]: List of error messages for each missing field. """ - # Get all the fields that should be recorded per node - rel_dict = self.dmr.relationships_dictionary - node_fields = [] - for value in rel_dict.values(): - if "node_label" in value.keys(): - node_fields.append(value["node_label"]) - - error = [] - missing_fields = [] - # Check that required fields are present for each node. - for node, node_dict in self.graph.nodes(data=True): - missing_fields.extend( - [(node, f) for f in node_fields if f not in node_dict.keys()] - ) - - if missing_fields: - for missing_field in missing_fields: - error.append( - ( - f"For entry: {missing_field[0]}, the required field {missing_field[1]} " - "is missing in the data model graph, please double check your model and " - "generate the graph again." - ) - ) - return error + required_fields = get_node_labels_from(self.dmr.relationships_dictionary) + missing_fields = get_missing_fields_from(self.node_info, required_fields) + return create_missing_fields_error_messages(missing_fields) - def run_cycles(self) -> None: + def _run_cycles(self) -> None: """run_cycles""" cycles = nx.simple_cycles(self.graph) if cycles: # pylint:disable=using-constant-test @@ -98,7 +106,7 @@ def run_cycles(self) -> None: ) ) - def check_is_dag(self) -> list[str]: + def _check_is_dag(self) -> list[str]: """Check that generated graph is a directed acyclic graph Returns: @@ -110,7 +118,7 @@ def check_is_dag(self) -> list[str]: error = [] if not nx.is_directed_acyclic_graph(self.graph): cycles = multiprocessing.Process( - target=self.run_cycles, + target=self._run_cycles, name="Get Cycles", ) cycles.start() @@ -134,7 +142,7 @@ def check_is_dag(self) -> list[str]: return error - def check_blacklisted_characters(self) -> list[str]: + def _check_blacklisted_characters(self) -> list[str]: """ We strip these characters in store, so not sure if it matter if we have them now, maybe add warning @@ -143,72 +151,188 @@ def check_blacklisted_characters(self) -> list[str]: list[str]: list of warnings for each node in the graph, that has a Display name that contains blacklisted characters. """ - warning = [] - for _, node_dict in self.graph.nodes(data=True): - if any( - bl_char in node_dict["displayName"] - for bl_char in self.blacklisted_chars - ): - node_display_name = node_dict["displayName"] - blacklisted_characters_found = [ - bl_char - for bl_char in self.blacklisted_chars - if bl_char in node_dict["displayName"] - ] - blacklisted_characters_str = ",".join(blacklisted_characters_found) - warning.append( - ( - f"Node: {node_display_name} contains a blacklisted character(s): " - f"{blacklisted_characters_str}, they will be striped if used in " - "Synapse annotations." - ) - ) - return warning + return check_characters_in_node_display_name( + self.node_info, BLACKLISTED_CHARACTERS + ) - def check_reserved_names(self) -> list[str]: + def _check_reserved_names(self) -> list[str]: """Identify if any names nodes in the data model graph are the same as reserved name. Returns: - error, list: List of erros for every node in the graph whose name overlaps + error, list: List of errors for every node in the graph whose name overlaps with the reserved names. """ - error = [] - reserved_names_found = [ - (name, node) - for node in self.graph.nodes - for name in self.reserved_names - if name.lower() == node.lower() + reserved_names_found = match_node_names_with_reserved_names( + self.graph.nodes, RESERVED_NAMES + ) + return create_reserve_name_error_messages(reserved_names_found) + + +def get_node_labels_from(input_dict: dict) -> list: + """ + Searches dict, for nested dict. + For each nested dict, if it contains the key "node label" that value is returned. + + Args: + input_dict (dict): A dictionary with possible nested dictionaries + + Returns: + list: All values for node labels + """ + node_fields = [] + for value in input_dict.values(): + if isinstance(value, dict) and "node_label" in value.keys(): + node_fields.append(value["node_label"]) + return node_fields + + +def get_missing_fields_from( + nodes: list[Node], required_fields: Iterable +) -> list[Tuple[str, str]]: + """ + Iterates through each node and checks if it contains each required_field. + Any missing fields are returned. + + Args: + nodes (list[Node]): A list of nodes. + required_fields (Iterable): A Iterable of fields each node should have + + Returns: + list[Tuple[str, str]]: A list of missing fields. + The first item in each field is the nodes name, and the second is the missing field. + """ + missing_fields: list[Tuple[str, str]] = [] + for node in nodes: + missing_fields.extend( + [ + (str(node.name), str(field)) + for field in required_fields + if field not in node.fields.keys() + ] + ) + return missing_fields + + +def create_missing_fields_error_messages( + missing_fields: list[Tuple[str, str]] +) -> list[str]: + """Creates the error message for when a node is missing a required field + + Args: + missing_fields (list[Tuple[str, str]]): A list of tuples of nodes with missing fields + The first item is the node + The second item is the missing field + + Returns: + list[str]: The error message + """ + errors: list[str] = [] + for missing_field in missing_fields: + errors.append( + ( + f"For entry: {missing_field[0]}, " + f"the required field {missing_field[1]} " + "is missing in the data model graph, please double check your model and " + "generate the graph again." + ) + ) + return errors + + +def check_characters_in_node_display_name( + nodes: list[Node], blacklisted_characters: list[str] +) -> list[str]: + """Checks each node 'displayName' field has no blacklisted characters + + Args: + nodes (list[Node]): A list of nodes. + blacklisted_characters (list[str]): A list of characters not allowed in the node + display name + + Raises: + ValueError: Any node is missing the 'displayName' field + + Returns: + list[str]: A list of warning messages + """ + warnings: list[str] = [] + for node in nodes: + node_display_name = node.display_name + + blacklisted_characters_found = [ + character + for character in node_display_name + if character in blacklisted_characters ] - if reserved_names_found: - for reserved_name, node_name in reserved_names_found: - error.append( - ( - f"Your data model entry name: {node_name} overlaps with the reserved name: " - f"{reserved_name}. Please change this name in your data model." - ) + + if blacklisted_characters_found: + warnings.append( + create_blacklisted_characters_error_message( + blacklisted_characters_found, node_display_name ) - return error + ) + return warnings - def check_namespace_overlap(self) -> list: - """ - Check if name is repeated. - Implement in the future - """ - warning: list = [] - return warning - def check_for_orphan_attributes(self) -> list: - """ - Check if attribute is specified but not connected to another attribute or component. - Implement in future - """ - warning: list = [] - return warning +def create_blacklisted_characters_error_message( + blacklisted_characters: list[str], node_name: str +) -> str: + """Creates am error message for the presence of blacklisted characters - def check_namespace_similarity(self) -> list: - """ - Using AI, check if submitted attributes or valid values are similar to other ones, - warn users. - Implement in future - """ - warning: list = [] - return warning + Args: + blacklisted_characters (list[str]): A list of characters that + are unallowed in certain node field names + node_name (str): The name of the node with the blacklisted characters + + Returns: + str: _description_ + """ + blacklisted_characters_str = ",".join(blacklisted_characters) + return ( + f"Node: {node_name} contains a blacklisted character(s): " + f"{blacklisted_characters_str}, they will be striped if used in " + "Synapse annotations." + ) + + +def match_node_names_with_reserved_names( + node_names: Iterable, reserved_names: Iterable[str] +) -> list[Tuple[str, str]]: + """Matches node names with those from a reserved list + + Args: + node_names (Iterable): An iterable of node names + reserved_names (Iterable[str]): A list of names to match with the node names + + Returns: + list[Tuple[str, str]]: A List of tuples where the node name matches a reserved name + The first item is the reserved name + The second item is the node name + """ + node_name_strings = [str(name) for name in node_names] + node_name_product = itertools.product(reserved_names, node_name_strings) + reserved_names_found = [ + node for node in node_name_product if node[0].lower() == node[1].lower() + ] + return reserved_names_found + + +def create_reserve_name_error_messages( + reserved_names_found: list[Tuple[str, str]] +) -> list[str]: + """Creates the error messages when a reserved name is used + + Args: + reserved_names_found (list[Tuple[str, str]]): A list of tuples + The first item is the reserved name + The second item is the node name that overlapped with a reserved name + + Returns: + list[str]: A list of error messages + """ + return [ + ( + f"Your data model entry name: {node_name} overlaps with the reserved name: " + f"{reserved_name}. Please change this name in your data model." + ) + for reserved_name, node_name in reserved_names_found + ] diff --git a/schematic/store/__init__.py b/schematic/store/__init__.py index 60d8d876a..e69de29bb 100644 --- a/schematic/store/__init__.py +++ b/schematic/store/__init__.py @@ -1,2 +0,0 @@ -from schematic.store.base import BaseStorage -from schematic.store.synapse import SynapseStorage diff --git a/schematic/store/database/README.md b/schematic/store/database/README.md new file mode 100644 index 000000000..347f0cafd --- /dev/null +++ b/schematic/store/database/README.md @@ -0,0 +1,18 @@ +The python scripts stored here are sourced from . +This logic was extracted out of `schematic_db` as there were a new of required +dependency updates that prevented using the updated `schematic_db` code. Those +dependency updates included: + +- Great expectations +- Pydantic +- tenacity +- Discontinuing python 3.9 + +As such the following considerations were made: + +- Extract the required functionality out of `schematic_db` such that `schematic` can +continue to function with the current dependencies, but, updates to the dependent code +may still occur. +- Functionality that exists within this extracted code should be split between +application (schematic) specific business logic, and core (SYNPY) logic. This will start +to come to fruition with SYNPY-1418 where table functionality is going to be expanded. diff --git a/schematic/store/database/synapse_database.py b/schematic/store/database/synapse_database.py new file mode 100644 index 000000000..9f61f94ee --- /dev/null +++ b/schematic/store/database/synapse_database.py @@ -0,0 +1,138 @@ +"""SynapseDatabase""" + +import pandas as pd +import synapseclient as sc # type: ignore + +from schematic.store.database.synapse_database_wrapper import Synapse +from schematic.store.synapse_tracker import SynapseEntityTracker + + +class SynapseDatabaseMissingTableAnnotationsError(Exception): + """Raised when a table is missing expected annotations""" + + def __init__(self, message: str, table_name: str) -> None: + self.message = message + self.table_name = table_name + super().__init__(self.message) + + def __str__(self) -> str: + return f"{self.message}; " f"name: {self.table_name};" + + +class InputDataframeMissingColumn(Exception): + """Raised when an input dataframe is missing a needed column(s)""" + + def __init__( + self, message: str, table_columns: list[str], missing_columns: list[str] + ) -> None: + self.message = message + self.table_columns = table_columns + self.missing_columns = missing_columns + super().__init__(self.message) + + def __str__(self) -> str: + return ( + f"{self.message}; " + f"table_columns: {self.table_columns}; " + f"missing_columns: {self.missing_columns}" + ) + + +class SynapseDatabase: + """Represents a database stored as Synapse tables""" + + def __init__( + self, + auth_token: str, + project_id: str, + synapse_entity_tracker: SynapseEntityTracker = None, + syn: sc.Synapse = None, + ) -> None: + """Init + + Args: + auth_token (str): A Synapse auth_token + project_id (str): A Synapse id for a project + synapse_entity_tracker: Tracker for a pull-through cache of Synapse entities + """ + self.synapse = Synapse( + auth_token=auth_token, + project_id=project_id, + synapse_entity_tracker=synapse_entity_tracker, + syn=syn, + ) + + def upsert_table_rows(self, table_name: str, data: pd.DataFrame) -> None: + """Upserts rows into the given table + + Args: + table_name (str): The name of the table to be upserted into. + data (pd.DataFrame): The table the rows will come from + + Raises: + SynapseDatabaseMissingTableAnnotationsError: Raised when the table has no + primary key annotation. + """ + table_id = self.synapse.get_synapse_id_from_table_name(table_name) + annotations = self.synapse.get_entity_annotations(table_id) + if "primary_key" not in annotations: + raise SynapseDatabaseMissingTableAnnotationsError( + "Table has no primary_key annotation", table_name + ) + primary_key = annotations["primary_key"][0] + self._upsert_table_rows(table_id, data, primary_key) + + def _upsert_table_rows( + self, table_id: str, data: pd.DataFrame, primary_key: str + ) -> None: + """Upserts rows into the given table + + Args: + table_id (str): The Synapse id of the table to be upserted into. + data (pd.DataFrame): The table the rows will come from + primary_key (str): The primary key of the table used to identify + which rows to update + + Raises: + InputDataframeMissingColumn: Raised when the input dataframe has + no column that matches the primary key argument. + """ + if primary_key not in list(data.columns): + raise InputDataframeMissingColumn( + "Input dataframe missing primary key column.", + list(data.columns), + [primary_key], + ) + + table = self._create_primary_key_table(table_id, primary_key) + merged_table = pd.merge( + data, table, how="left", on=primary_key, validate="one_to_one" + ) + self.synapse.upsert_table_rows(table_id, merged_table) + + def _create_primary_key_table( + self, table_id: str, primary_key: str + ) -> pd.DataFrame: + """Creates a dataframe with just the primary key of the table + + Args: + table_id (str): The id of the table to query + primary_key (str): The name of the primary key + + Returns: + pd.DataFrame: The table in pandas.DataFrame form with the primary key, ROW_ID, and + ROW_VERSION columns + + Raises: + InputDataframeMissingColumn: Raised when the synapse table has no column that + matches the primary key argument. + """ + table = self.synapse.query_table(table_id, include_row_data=True) + if primary_key not in list(table.columns): + raise InputDataframeMissingColumn( + "Synapse table missing primary key column", + list(table.columns), + [primary_key], + ) + table = table[["ROW_ID", "ROW_VERSION", primary_key]] + return table diff --git a/schematic/store/database/synapse_database_wrapper.py b/schematic/store/database/synapse_database_wrapper.py new file mode 100644 index 000000000..b827b140f --- /dev/null +++ b/schematic/store/database/synapse_database_wrapper.py @@ -0,0 +1,160 @@ +"""Wrapper class for interacting with Synapse database objects. Eventually this will +be replaced with a more database/table class that exists within the SYNPY project.""" + +from typing import Optional + +import pandas # type: ignore +import synapseclient # type: ignore +from opentelemetry import trace + +from schematic.store.synapse_tracker import SynapseEntityTracker + + +class SynapseTableNameError(Exception): + """SynapseTableNameError""" + + def __init__(self, message: str, table_name: str) -> None: + """ + Args: + message (str): A message describing the error + table_name (str): The name of the table + """ + self.message = message + self.table_name = table_name + super().__init__(self.message) + + def __str__(self) -> str: + return f"{self.message}:{self.table_name}" + + +class Synapse: # pylint: disable=too-many-public-methods + """ + The Synapse class handles interactions with a project in Synapse. + """ + + def __init__( + self, + auth_token: str, + project_id: str, + cache_root_dir: Optional[str] = None, + synapse_entity_tracker: SynapseEntityTracker = None, + syn: synapseclient.Synapse = None, + ) -> None: + """Init + + Args: + auth_token (str): A Synapse auth_token + project_id (str): A Synapse id for a project + cache_root_dir( str ): Where the directory of the synapse cache should be located + synapse_entity_tracker: Tracker for a pull-through cache of Synapse entities + """ + self.project_id = project_id + if syn: + self.syn = syn + else: + syn = synapseclient.Synapse(cache_root_dir=cache_root_dir) + syn.login(authToken=auth_token, silent=True) + current_span = trace.get_current_span() + if current_span.is_recording(): + current_span.set_attribute("user.id", syn.credentials.owner_id) + self.syn = syn + self.synapse_entity_tracker = synapse_entity_tracker or SynapseEntityTracker() + + def get_synapse_id_from_table_name(self, table_name: str) -> str: + """Gets the synapse id from the table name + + Args: + table_name (str): The name of the table + + Raises: + SynapseTableNameError: When no tables match the name + SynapseTableNameError: When multiple tables match the name + + Returns: + str: A synapse id + """ + matching_table_id = self.syn.findEntityId( + name=table_name, parent=self.project_id + ) + if matching_table_id is None: + raise SynapseTableNameError("No matching tables with name:", table_name) + return matching_table_id + + def query_table( + self, synapse_id: str, include_row_data: bool = False + ) -> pandas.DataFrame: + """Queries a whole table + + Args: + synapse_id (str): The Synapse id of the table to delete + include_row_data (bool): Include row_id and row_etag. Defaults to False. + + Returns: + pandas.DataFrame: The queried table + """ + query = f"SELECT * FROM {synapse_id}" + return self.execute_sql_query(query, include_row_data) + + def execute_sql_query( + self, query: str, include_row_data: bool = False + ) -> pandas.DataFrame: + """Execute a Sql query + + Args: + query (str): A SQL statement that can be run by Synapse + include_row_data (bool): Include row_id and row_etag. Defaults to False. + + Returns: + pandas.DataFrame: The queried table + """ + result = self.execute_sql_statement(query, include_row_data) + table = pandas.read_csv(result.filepath) + return table + + def execute_sql_statement( + self, statement: str, include_row_data: bool = False + ) -> synapseclient.table.CsvFileTable: + """Execute a SQL statement + + Args: + statement (str): A SQL statement that can be run by Synapse + include_row_data (bool): Include row_id and row_etag. Defaults to False. + + Returns: + synapseclient.table.CsvFileTable: The synapse table result from + the provided statement + """ + table = self.syn.tableQuery( + statement, includeRowIdAndRowVersion=include_row_data + ) + assert isinstance(table, synapseclient.table.CsvFileTable) + return table + + def upsert_table_rows(self, synapse_id: str, data: pandas.DataFrame) -> None: + """Upserts rows from the given table + + Args: + synapse_id (str): The Synapse ID fo the table to be upserted into + data (pandas.DataFrame): The table the rows will come from + """ + self.syn.store(synapseclient.Table(synapse_id, data)) + # Commented out until https://sagebionetworks.jira.com/browse/PLFM-8605 is resolved + # storage_result = self.syn.store(synapseclient.Table(synapse_id, data)) + # self.synapse_entity_tracker.add(synapse_id=storage_result.schema.id, entity=storage_result.schema) + self.synapse_entity_tracker.remove(synapse_id=synapse_id) + + def get_entity_annotations(self, synapse_id: str) -> synapseclient.Annotations: + """Gets the annotations for the Synapse entity + + Args: + synapse_id (str): The Synapse id of the entity + + Returns: + synapseclient.Annotations: The annotations of the Synapse entity in dict form. + """ + entity = self.synapse_entity_tracker.get( + synapse_id=synapse_id, syn=self.syn, download_file=False + ) + return synapseclient.Annotations( + id=entity.id, etag=entity.etag, values=entity.annotations + ) diff --git a/schematic/store/synapse.py b/schematic/store/synapse.py index 861789374..7ccd98362 100644 --- a/schematic/store/synapse.py +++ b/schematic/store/synapse.py @@ -7,9 +7,10 @@ import re import secrets import shutil +import time import uuid # used to generate unique names for entities from copy import deepcopy -from dataclasses import asdict, dataclass +from dataclasses import dataclass, field from time import sleep # allows specifying explicit variable types @@ -20,7 +21,7 @@ import synapseclient import synapseutils from opentelemetry import trace -from schematic_db.rdb.synapse_database import SynapseDatabase +from synapseclient import Annotations as OldAnnotations from synapseclient import ( Column, Entity, @@ -33,14 +34,14 @@ Table, as_table_columns, ) -from synapseclient.api import get_entity_id_bundle2 +from synapseclient.annotations import _convert_to_annotations_list +from synapseclient.api import get_config_file, get_entity_id_bundle2 from synapseclient.core.constants.concrete_types import PROJECT_ENTITY from synapseclient.core.exceptions import ( SynapseAuthenticationError, SynapseHTTPError, SynapseUnmetAccessRestrictions, ) -from synapseclient.entity import File from synapseclient.models.annotations import Annotations from synapseclient.table import CsvFileTable, Schema, build_table from tenacity import ( @@ -55,6 +56,8 @@ from schematic.exceptions import AccessCredentialsError from schematic.schemas.data_model_graph import DataModelGraphExplorer from schematic.store.base import BaseStorage +from schematic.store.database.synapse_database import SynapseDatabase +from schematic.store.synapse_tracker import SynapseEntityTracker from schematic.utils.df_utils import col_in_dataframe, load_df, update_df # entity_type_mapping, get_dir_size, create_temp_folder, check_synapse_cache_size, and clear_synapse_cache functions are used for AWS deployment @@ -66,6 +69,7 @@ entity_type_mapping, get_dir_size, ) +from schematic.utils.io_utils import cleanup_temporary_storage from schematic.utils.schema_utils import get_class_label_from_display_name from schematic.utils.validate_utils import comma_separated_list_regex, rule_in_rule_list @@ -79,35 +83,107 @@ class ManifestDownload(object): """ syn: an object of type synapseclient. manifest_id: id of a manifest + synapse_entity_tracker: Tracker for a pull-through cache of Synapse entities """ syn: synapseclient.Synapse manifest_id: str + synapse_entity_tracker: SynapseEntityTracker = field( + default_factory=SynapseEntityTracker + ) - def _download_manifest_to_folder(self) -> File: + def _download_manifest_to_folder(self, use_temporary_folder: bool = True) -> File: """ - try downloading a manifest to local cache or a given folder - manifest + Try downloading a manifest to a specific folder (temporary or not). When the + `use_temporary_folder` is set to True, the manifest will be downloaded to a + temporary folder. This is useful for when the code is running as an API server + where multiple requests are being made at the same time. This will prevent + multiple requests from overwriting the same manifest file. When the + `use_temporary_folder` is set to False, the manifest will be downloaded to the + default manifest folder. + + Args: + use_temporary_folder: boolean argument indicating if a temporary folder + should be used to store the manifest file. This is useful when running + this code as an API server where multiple requests could be made at the + same time. This is set to False when the code is being used from the + CLI. Defaults to True. + Return: manifest_data: A Synapse file entity of the downloaded manifest """ + manifest_data = self.synapse_entity_tracker.get( + synapse_id=self.manifest_id, + syn=self.syn, + download_file=False, + retrieve_if_not_present=False, + ) + current_span = trace.get_current_span() + if ( + manifest_data + and (file_handle := manifest_data.get("_file_handle", None)) + and current_span.is_recording() + ): + current_span.set_attribute( + "schematic.manifest_size", file_handle.get("contentSize", 0) + ) + + if manifest_data and manifest_data.path: + return manifest_data + if "SECRETS_MANAGER_SECRETS" in os.environ: temporary_manifest_storage = "/var/tmp/temp_manifest_download" - # clear out all the existing manifests - if os.path.exists(temporary_manifest_storage): - shutil.rmtree(temporary_manifest_storage) + cleanup_temporary_storage( + temporary_manifest_storage, time_delta_seconds=3600 + ) # create a new directory to store manifest if not os.path.exists(temporary_manifest_storage): os.mkdir(temporary_manifest_storage) # create temporary folders for storing manifests - download_location = create_temp_folder(temporary_manifest_storage) + download_location = create_temp_folder( + path=temporary_manifest_storage, + prefix=f"{self.manifest_id}-{time.time()}-", + ) else: - download_location = CONFIG.manifest_folder - manifest_data = self.syn.get( - self.manifest_id, - downloadLocation=download_location, - ifcollision="overwrite.local", + if use_temporary_folder: + download_location = create_temp_folder( + path=CONFIG.manifest_folder, + prefix=f"{self.manifest_id}-{time.time()}-", + ) + else: + download_location = CONFIG.manifest_folder + + manifest_data = self.synapse_entity_tracker.get( + synapse_id=self.manifest_id, + syn=self.syn, + download_file=True, + retrieve_if_not_present=True, + download_location=download_location, ) + + # This is doing a rename of the downloaded file. The reason this is important + # is that if we are re-using a file that was previously downloaded, but the + # file had been renamed. The file downloaded from the Synapse client is just + # a direct copy of that renamed file. This code will set the name of the file + # to the original name that was used to download the file. Note: An MD5 checksum + # of the file will still be performed so if the file has changed, it will be + # downloaded again. + filename = manifest_data._file_handle.fileName + if filename != os.path.basename(manifest_data.path): + parent_folder = os.path.dirname(manifest_data.path) + manifest_original_name_and_path = os.path.join(parent_folder, filename) + + self.syn.cache.remove( + file_handle_id=manifest_data.dataFileHandleId, path=manifest_data.path + ) + os.rename(manifest_data.path, manifest_original_name_and_path) + manifest_data.path = manifest_original_name_and_path + self.syn.cache.add( + file_handle_id=manifest_data.dataFileHandleId, + path=manifest_original_name_and_path, + md5=manifest_data._file_handle.contentMd5, + ) + return manifest_data def _entity_type_checking(self) -> str: @@ -117,15 +193,21 @@ def _entity_type_checking(self) -> str: if the entity type is wrong, raise an error """ # check the type of entity - entity_type = entity_type_mapping(self.syn, self.manifest_id) + entity_type = entity_type_mapping( + syn=self.syn, + entity_id=self.manifest_id, + synapse_entity_tracker=self.synapse_entity_tracker, + ) if entity_type != "file": logger.error( f"You are using entity type: {entity_type}. Please provide a file ID" ) - @staticmethod def download_manifest( - self, newManifestName: str = "", manifest_df: pd.DataFrame = pd.DataFrame() + self, + newManifestName: str = "", + manifest_df: pd.DataFrame = pd.DataFrame(), + use_temporary_folder: bool = True, ) -> Union[str, File]: """ Download a manifest based on a given manifest id. @@ -145,7 +227,9 @@ def download_manifest( # download a manifest try: - manifest_data = self._download_manifest_to_folder() + manifest_data = self._download_manifest_to_folder( + use_temporary_folder=use_temporary_folder + ) except (SynapseUnmetAccessRestrictions, SynapseAuthenticationError): # if there's an error getting an uncensored manifest, try getting the censored manifest if not manifest_df.empty: @@ -154,7 +238,9 @@ def download_manifest( new_manifest_id = manifest_df[censored]["id"][0] self.manifest_id = new_manifest_id try: - manifest_data = self._download_manifest_to_folder() + manifest_data = self._download_manifest_to_folder( + use_temporary_folder=use_temporary_folder + ) except ( SynapseUnmetAccessRestrictions, SynapseAuthenticationError, @@ -175,12 +261,25 @@ def download_manifest( parent_folder = os.path.dirname(manifest_data.get("path")) new_manifest_path_name = os.path.join(parent_folder, new_manifest_filename) - os.rename(manifest_data["path"], new_manifest_path_name) + + # Copy file to new location. The purpose of using a copy instead of a rename + # is to avoid any potential issues with the file being used in another + # process. This avoids any potential race or code cocurrency conditions. + shutil.copyfile(src=manifest_data["path"], dst=new_manifest_path_name) + + # Adding this to cache will allow us to re-use the already downloaded + # manifest file for up to 1 hour. + self.syn.cache.add( + file_handle_id=manifest_data.dataFileHandleId, + path=new_manifest_path_name, + md5=manifest_data._file_handle.contentMd5, + ) # Update file names/paths in manifest_data manifest_data["name"] = new_manifest_filename manifest_data["filename"] = new_manifest_filename manifest_data["path"] = new_manifest_path_name + return manifest_data @@ -219,13 +318,22 @@ def __init__( Consider necessity of adding "columns" and "where_clauses" params to the constructor. Currently with how `query_fileview` is implemented, these params are not needed at this step but could be useful in the future if the need for more scoped querys expands. """ self.syn = self.login(synapse_cache_path, access_token) + current_span = trace.get_current_span() + if current_span.is_recording(): + current_span.set_attribute("user.id", self.syn.credentials.owner_id) self.project_scope = project_scope self.storageFileview = CONFIG.synapse_master_fileview_id self.manifest = CONFIG.synapse_manifest_basename self.root_synapse_cache = self.syn.cache.cache_root_dir + self.synapse_entity_tracker = SynapseEntityTracker() if perform_query: self.query_fileview(columns=columns, where_clauses=where_clauses) + # TODO: When moving this over to a regular cron-job the following logic should be + # out of `manifest_download`: + # if "SECRETS_MANAGER_SECRETS" in os.environ: + # temporary_manifest_storage = "/var/tmp/temp_manifest_download" + # cleanup_temporary_storage(temporary_manifest_storage, time_delta_seconds=3600) @tracer.start_as_current_span("SynapseStorage::_purge_synapse_cache") def _purge_synapse_cache( self, maximum_storage_allowed_cache_gb: int = 1, minute_buffer: int = 15 @@ -263,6 +371,7 @@ def query_fileview( self, columns: Optional[list] = None, where_clauses: Optional[list] = None, + force_requery: Optional[bool] = False, ) -> None: """ Method to query the Synapse FileView and store the results in a pandas DataFrame. The results are stored in the storageFileviewTable attribute. @@ -270,12 +379,10 @@ def query_fileview( Args: columns (Optional[list], optional): List of columns to be selected from the table. Defaults behavior is to request all columns. where_clauses (Optional[list], optional): List of where clauses to be used to scope the query. Defaults to None. + force_requery (Optional[bool], optional): If True, forces a requery of the fileview. Defaults to False. """ self._purge_synapse_cache() - self.storageFileview = CONFIG.synapse_master_fileview_id - self.manifest = CONFIG.synapse_manifest_basename - # Initialize to assume that the new fileview query will be different from what may already be stored. Initializes to True because generally one will not have already been performed self.new_query_different = True @@ -289,8 +396,8 @@ def query_fileview( if previous_query_built: self.new_query_different = self.fileview_query != previous_query - # Only perform the query if it is different from the previous query - if self.new_query_different: + # Only perform the query if it is different from the previous query or we are forcing new results to be retrieved + if self.new_query_different or force_requery: try: self.storageFileviewTable = self.syn.tableQuery( query=self.fileview_query, @@ -348,6 +455,7 @@ def _build_query( return @staticmethod + @tracer.start_as_current_span("SynapseStorage::login") def login( synapse_cache_path: Optional[str] = None, access_token: Optional[str] = None, @@ -371,8 +479,16 @@ def login( # login using a token if access_token: try: - syn = synapseclient.Synapse(cache_root_dir=synapse_cache_path) + syn = synapseclient.Synapse( + cache_root_dir=synapse_cache_path, + debug=False, + skip_checks=True, + cache_client=False, + ) syn.login(authToken=access_token, silent=True) + current_span = trace.get_current_span() + if current_span.is_recording(): + current_span.set_attribute("user.id", syn.credentials.owner_id) except SynapseHTTPError as exc: raise ValueError( "No access to resources. Please make sure that your token is correct" @@ -382,8 +498,14 @@ def login( syn = synapseclient.Synapse( configPath=CONFIG.synapse_configuration_path, cache_root_dir=synapse_cache_path, + debug=False, + skip_checks=True, + cache_client=False, ) syn.login(silent=True) + current_span = trace.get_current_span() + if current_span.is_recording(): + current_span.set_attribute("user.id", syn.credentials.owner_id) return syn def missing_entity_handler(method): @@ -451,6 +573,7 @@ def getPaginatedRestResults(self, currentUserId: str) -> Dict[str, str]: return all_results + @tracer.start_as_current_span("SynapseStorage::getStorageProjects") def getStorageProjects(self, project_scope: List = None) -> list[tuple[str, str]]: """Gets all storage projects the current user has access to, within the scope of the 'storageFileview' attribute. @@ -462,23 +585,20 @@ def getStorageProjects(self, project_scope: List = None) -> list[tuple[str, str] storageProjects = self.storageFileviewTable["projectId"].unique() # get the set of storage Synapse project accessible for this user - - # get current user name and user ID - currentUser = self.syn.getUserProfile() - currentUserName = currentUser.userName - currentUserId = currentUser.ownerId - # get a list of projects from Synapse - currentUserProjects = self.getPaginatedRestResults(currentUserId) - - # prune results json filtering project id - currentUserProjects = [ - currentUserProject.get("id") - for currentUserProject in currentUserProjects["results"] - ] + current_user_project_headers = self.synapse_entity_tracker.get_project_headers( + current_user_id=self.syn.credentials.owner_id, syn=self.syn + ) + project_id_to_name_dict = {} + current_user_projects = [] + for project_header in current_user_project_headers: + project_id_to_name_dict[project_header.get("id")] = project_header.get( + "name" + ) + current_user_projects.append(project_header.get("id")) # find set of user projects that are also in this pipeline's storage projects set - storageProjects = list(set(storageProjects) & set(currentUserProjects)) + storageProjects = list(set(storageProjects) & set(current_user_projects)) # Limit projects to scope if specified if project_scope: @@ -492,8 +612,8 @@ def getStorageProjects(self, project_scope: List = None) -> list[tuple[str, str] # prepare a return list of project IDs and names projects = [] for projectId in storageProjects: - projectName = self.syn.get(projectId, downloadFile=False).name - projects.append((projectId, projectName)) + project_name_from_project_header = project_id_to_name_dict.get(projectId) + projects.append((projectId, project_name_from_project_header)) sorted_projects_list = sorted(projects, key=lambda tup: tup[0]) @@ -513,13 +633,11 @@ def getStorageDatasetsInProject(self, projectId: str) -> list[tuple[str, str]]: # select all folders and fetch their names from within the storage project; # if folder content type is defined, only select folders that contain datasets - areDatasets = False if "contentType" in self.storageFileviewTable.columns: foldersTable = self.storageFileviewTable[ (self.storageFileviewTable["contentType"] == "dataset") & (self.storageFileviewTable["projectId"] == projectId) ] - areDatasets = True else: foldersTable = self.storageFileviewTable[ (self.storageFileviewTable["type"] == "folder") @@ -568,7 +686,9 @@ def getFilesInStorageDataset( self.syn, datasetId, includeTypes=["folder", "file"] ) - current_entity_location = self.syn.get(entity=datasetId, downloadFile=False) + current_entity_location = self.synapse_entity_tracker.get( + synapse_id=datasetId, syn=self.syn, download_file=False + ) def walk_back_to_project( current_location: Entity, location_prefix: str, skip_entry: bool @@ -605,8 +725,13 @@ def walk_back_to_project( and current_location["concreteType"] == PROJECT_ENTITY ): return updated_prefix + current_location = self.synapse_entity_tracker.get( + synapse_id=current_location["parentId"], + syn=self.syn, + download_file=False, + ) return walk_back_to_project( - current_location=self.syn.get(entity=current_location["parentId"]), + current_location=current_location, location_prefix=updated_prefix, skip_entry=False, ) @@ -617,8 +742,11 @@ def walk_back_to_project( skip_entry=True, ) - project = self.getDatasetProject(datasetId) - project_name = self.syn.get(project, downloadFile=False).name + project_id = self.getDatasetProject(datasetId) + project = self.synapse_entity_tracker.get( + synapse_id=project_id, syn=self.syn, download_file=False + ) + project_name = project.name file_list = [] # iterate over all results @@ -685,6 +813,7 @@ def getDatasetManifest( datasetId: str, downloadFile: bool = False, newManifestName: str = "", + use_temporary_folder: bool = True, ) -> Union[str, File]: """Gets the manifest associated with a given dataset. @@ -692,6 +821,11 @@ def getDatasetManifest( datasetId: synapse ID of a storage dataset. downloadFile: boolean argument indicating if manifest file in dataset should be downloaded or not. newManifestName: new name of a manifest that gets downloaded + use_temporary_folder: boolean argument indicating if a temporary folder + should be used to store the manifest file. This is useful when running + this code as an API server where multiple requests could be made at the + same time. This is set to False when the code is being used from the + CLI. Defaults to True. Returns: manifest_syn_id (String): Synapse ID of exisiting manifest file. @@ -726,9 +860,15 @@ def getDatasetManifest( else: manifest_syn_id = self._get_manifest_id(manifest) if downloadFile: - md = ManifestDownload(self.syn, manifest_id=manifest_syn_id) - manifest_data = ManifestDownload.download_manifest( - md, newManifestName=newManifestName, manifest_df=manifest + md = ManifestDownload( + self.syn, + manifest_id=manifest_syn_id, + synapse_entity_tracker=self.synapse_entity_tracker, + ) + manifest_data = md.download_manifest( + newManifestName=newManifestName, + manifest_df=manifest, + use_temporary_folder=use_temporary_folder, ) # TO DO: revisit how downstream code handle manifest_data. If the downstream code would break when manifest_data is an empty string, # then we should catch the error here without returning an empty string. @@ -745,7 +885,10 @@ def getDataTypeFromManifest(self, manifestId: str): manifestId: synapse ID of a manifest """ # get manifest file path - manifest_filepath = self.syn.get(manifestId).path + manifest_entity = self.synapse_entity_tracker.get( + synapse_id=manifestId, syn=self.syn, download_file=True + ) + manifest_filepath = manifest_entity.path # load manifest dataframe manifest = load_df( @@ -923,7 +1066,11 @@ def updateDatasetManifestFiles( if not manifest_id: return None - manifest_filepath = self.syn.get(manifest_id).path + manifest_entity = self.synapse_entity_tracker.get( + synapse_id=manifest_id, syn=self.syn, download_file=True + ) + manifest_filepath = manifest_entity.path + manifest = load_df(manifest_filepath) manifest_is_file_based = "Filename" in manifest.columns @@ -1024,7 +1171,9 @@ def getProjectManifests( # If manifest has annotations specifying component, use that if annotations and "Component" in annotations: component = annotations["Component"] - entity = self.syn.get(manifestId, downloadFile=False) + entity = self.synapse_entity_tracker.get( + synapse_id=manifestId, syn=self.syn, download_file=False + ) manifest_name = entity["properties"]["name"] # otherwise download the manifest and parse for information @@ -1174,7 +1323,7 @@ def upload_annotated_project_manifests_to_synapse( ("", ""), ) if not dry_run: - manifest_syn_id = self.associateMetadataWithFiles( + self.associateMetadataWithFiles( dmge, manifest_path, datasetId, manifest_record_type="table" ) manifest_loaded.append(manifest) @@ -1226,7 +1375,10 @@ def move_entities_to_new_project( if returnEntities: for entityId in annotation_entities: if not dry_run: - self.syn.move(entityId, datasetId) + moved_entity = self.syn.move(entityId, datasetId) + self.synapse_entity_tracker.add( + synapse_id=moved_entity.id, entity=moved_entity + ) else: logging.info( f"{entityId} will be moved to folder {datasetId}." @@ -1237,6 +1389,10 @@ def move_entities_to_new_project( projectId + "_archive", parent=newProjectId ) archive_project_folder = self.syn.store(archive_project_folder) + self.synapse_entity_tracker.add( + synapse_id=archive_project_folder.id, + entity=archive_project_folder, + ) # generate dataset folder dataset_archive_folder = Folder( @@ -1244,11 +1400,20 @@ def move_entities_to_new_project( parent=archive_project_folder.id, ) dataset_archive_folder = self.syn.store(dataset_archive_folder) + self.synapse_entity_tracker.add( + synapse_id=dataset_archive_folder.id, + entity=dataset_archive_folder, + ) for entityId in annotation_entities: # move entities to folder if not dry_run: - self.syn.move(entityId, dataset_archive_folder.id) + moved_entity = self.syn.move( + entityId, dataset_archive_folder.id + ) + self.synapse_entity_tracker.add( + synapse_id=moved_entity.id, entity=moved_entity + ) else: logging.info( f"{entityId} will be moved to folder {dataset_archive_folder.id}." @@ -1272,27 +1437,6 @@ def get_synapse_table(self, synapse_id: str) -> Tuple[pd.DataFrame, CsvFileTable return df, results - @tracer.start_as_current_span("SynapseStorage::_get_tables") - def _get_tables(self, datasetId: str = None, projectId: str = None) -> List[Table]: - if projectId: - project = projectId - elif datasetId: - project = self.syn.get(self.getDatasetProject(datasetId)) - - return list(self.syn.getChildren(project, includeTypes=["table"])) - - def get_table_info(self, datasetId: str = None, projectId: str = None) -> List[str]: - """Gets the names of the tables in the schema - Can pass in a synID for a dataset or project - Returns: - list[str]: A list of table names - """ - tables = self._get_tables(datasetId=datasetId, projectId=projectId) - if tables: - return {table["name"]: table["id"] for table in tables} - else: - return {None: None} - @missing_entity_handler @tracer.start_as_current_span("SynapseStorage::uploadDB") def uploadDB( @@ -1432,34 +1576,27 @@ def buildDB( manifest_table_id: synID of the uploaded table """ - table_info = self.get_table_info(datasetId=datasetId) - # Put table manifest onto synapse - schema = Schema( - name=table_name, - columns=col_schema, - parent=self.getDatasetProject(datasetId), + table_parent_id = self.getDatasetProject(datasetId=datasetId) + existing_table_id = self.syn.findEntityId( + name=table_name, parent=table_parent_id ) - if table_name in table_info: - existingTableId = table_info[table_name] - else: - existingTableId = None - tableOps = TableOperations( synStore=self, tableToLoad=table_manifest, tableName=table_name, datasetId=datasetId, - existingTableId=existingTableId, + existingTableId=existing_table_id, restrict=restrict, + synapse_entity_tracker=self.synapse_entity_tracker, ) - if not table_manipulation or table_name not in table_info.keys(): + if not table_manipulation or existing_table_id is None: manifest_table_id = tableOps.createTable( columnTypeDict=col_schema, specifySchema=True, ) - elif table_name in table_info.keys() and table_info[table_name]: + elif existing_table_id is not None: if table_manipulation.lower() == "replace": manifest_table_id = tableOps.replaceTable( specifySchema=True, @@ -1473,11 +1610,20 @@ def buildDB( manifest_table_id = tableOps.updateTable() if table_manipulation and table_manipulation.lower() == "upsert": - existing_tables = self.get_table_info(datasetId=datasetId) - tableId = existing_tables[table_name] - annos = self.syn.get_annotations(tableId) + table_entity = self.synapse_entity_tracker.get( + synapse_id=existing_table_id or manifest_table_id, + syn=self.syn, + download_file=False, + ) + annos = OldAnnotations( + id=table_entity.id, + etag=table_entity.etag, + values=table_entity.annotations, + ) annos["primary_key"] = table_manifest["Component"][0] + "_id" annos = self.syn.set_annotations(annos) + table_entity.etag = annos.etag + table_entity.annotations = annos return manifest_table_id @@ -1517,24 +1663,89 @@ def upload_manifest_file( + file_extension ) - manifestSynapseFile = File( - metadataManifestPath, - description="Manifest for dataset " + datasetId, - parent=datasetId, - name=file_name_new, + manifest_synapse_file = None + try: + # Rename the file to file_name_new then revert + # This is to maintain the original file name in-case other code is + # expecting that the file exists with the original name + original_file_path = metadataManifestPath + new_file_path = os.path.join( + os.path.dirname(metadataManifestPath), file_name_new + ) + os.rename(original_file_path, new_file_path) + + manifest_synapse_file = self._store_file_for_manifest_upload( + new_file_path=new_file_path, + dataset_id=datasetId, + existing_file_name=file_name_full, + file_name_new=file_name_new, + restrict_manifest=restrict_manifest, + ) + manifest_synapse_file_id = manifest_synapse_file.id + + finally: + # Revert the file name back to the original + os.rename(new_file_path, original_file_path) + + if manifest_synapse_file: + manifest_synapse_file.path = original_file_path + + return manifest_synapse_file_id + + def _store_file_for_manifest_upload( + self, + new_file_path: str, + dataset_id: str, + existing_file_name: str, + file_name_new: str, + restrict_manifest: bool, + ) -> File: + """Handles a create or update of a manifest file that is going to be uploaded. + If we already have a copy of the Entity in memory we will update that instance, + otherwise create a new File instance to be created in Synapse. Once stored + this will add the file to the `synapse_entity_tracker` for future reference. + + Args: + new_file_path (str): The path to the new manifest file + dataset_id (str): The Synapse ID of the dataset the manifest is associated with + existing_file_name (str): The name of the existing file + file_name_new (str): The name of the new file + restrict_manifest (bool): Whether the manifest should be restricted + + Returns: + File: The stored manifest file + """ + local_tracked_file_instance = ( + self.synapse_entity_tracker.search_local_by_parent_and_name( + name=existing_file_name, parent_id=dataset_id + ) + or self.synapse_entity_tracker.search_local_by_parent_and_name( + name=file_name_new, parent_id=dataset_id + ) ) - manifest_synapse_file_id = self.syn.store( - manifestSynapseFile, isRestricted=restrict_manifest - ).id - synapseutils.copy_functions.changeFileMetaData( - syn=self.syn, - entity=manifest_synapse_file_id, - downloadAs=file_name_new, - forceVersion=False, + if local_tracked_file_instance: + local_tracked_file_instance.path = new_file_path + local_tracked_file_instance.description = ( + "Manifest for dataset " + dataset_id + ) + manifest_synapse_file = local_tracked_file_instance + else: + manifest_synapse_file = File( + path=new_file_path, + description="Manifest for dataset " + dataset_id, + parent=dataset_id, + name=file_name_new, + ) + + manifest_synapse_file = self.syn.store( + manifest_synapse_file, isRestricted=restrict_manifest ) - return manifest_synapse_file_id + self.synapse_entity_tracker.add( + synapse_id=manifest_synapse_file.id, entity=manifest_synapse_file + ) + return manifest_synapse_file async def get_async_annotation(self, synapse_id: str) -> Dict[str, Any]: """get annotations asynchronously @@ -1569,7 +1780,19 @@ async def store_async_annotation(self, annotation_dict: dict) -> Annotations: etag=annotation_dict["annotations"]["etag"], id=annotation_dict["annotations"]["id"], ) - return await annotation_class.store_async(synapse_client=self.syn) + annotation_storage_result = await annotation_class.store_async( + synapse_client=self.syn + ) + local_entity = self.synapse_entity_tracker.get( + synapse_id=annotation_dict["annotations"]["id"], + syn=self.syn, + download_file=False, + retrieve_if_not_present=False, + ) + if local_entity: + local_entity.etag = annotation_storage_result.etag + local_entity.annotations = annotation_storage_result + return annotation_storage_result def process_row_annotations( self, @@ -1705,9 +1928,31 @@ async def format_row_annotations( v = v[0:472] + "[truncatedByDataCuratorApp]" metadataSyn[keySyn] = v - # set annotation(s) for the various objects/items in a dataset on Synapse - annos = await self.get_async_annotation(entityId) + # This will first check if the entity is already in memory, and if so, that + # instance is used. Unfortunately, the expected return format needs to match + # the Synapse API, so we need to convert the annotations to the expected format. + entity = self.synapse_entity_tracker.get( + synapse_id=entityId, + syn=self.syn, + download_file=False, + retrieve_if_not_present=False, + ) + if entity is not None: + synapse_annotations = _convert_to_annotations_list( + annotations=entity.annotations + ) + annos = { + "annotations": { + "id": entity.id, + "etag": entity.etag, + "annotations": synapse_annotations, + } + } + else: + annos = await self.get_async_annotation(entityId) + + # set annotation(s) for the various objects/items in a dataset on Synapse csv_list_regex = comma_separated_list_regex() annos = self.process_row_annotations( @@ -1729,7 +1974,9 @@ def format_manifest_annotations(self, manifest, manifest_synapse_id): For now just getting the Component. """ - entity = self.syn.get(manifest_synapse_id, downloadFile=False) + entity = self.synapse_entity_tracker.get( + synapse_id=manifest_synapse_id, syn=self.syn, download_file=False + ) is_file = entity.concreteType.endswith(".FileEntity") is_table = entity.concreteType.endswith(".TableEntity") @@ -1758,7 +2005,9 @@ def format_manifest_annotations(self, manifest, manifest_synapse_id): metadata = self.getTableAnnotations(manifest_synapse_id) # Get annotations - annos = self.syn.get_annotations(manifest_synapse_id) + annos = OldAnnotations( + id=entity.id, etag=entity.etag, values=entity.annotations + ) # Add metadata to the annotations for annos_k, annos_v in metadata.items(): @@ -1949,6 +2198,7 @@ def _create_entity_id(self, idx, row, manifest, datasetId): rowEntity = Folder(str(uuid.uuid4()), parent=datasetId) rowEntity = self.syn.store(rowEntity) entityId = rowEntity["id"] + self.synapse_entity_tracker.add(synapse_id=entityId, entity=rowEntity) row["entityId"] = entityId manifest.loc[idx, "entityId"] = entityId return manifest, entityId @@ -1973,18 +2223,11 @@ async def _process_store_annos(self, requests: Set[asyncio.Task]) -> None: annos = completed_task.result() if isinstance(annos, Annotations): - annos_dict = asdict(annos) - normalized_annos = {k.lower(): v for k, v in annos_dict.items()} - entity_id = normalized_annos["id"] - logger.info(f"Successfully stored annotations for {entity_id}") + logger.info(f"Successfully stored annotations for {annos.id}") else: # store annotations if they are not None if annos: - normalized_annos = { - k.lower(): v - for k, v in annos["annotations"]["annotations"].items() - } - entity_id = normalized_annos["entityid"] + entity_id = annos["annotations"]["id"] logger.info( f"Obtained and processed annotations for {entity_id} entity" ) @@ -2007,7 +2250,10 @@ async def add_annotations_to_entities_files( manifest_synapse_table_id="", annotation_keys: str = "class_label", ): - """Depending on upload type add Ids to entityId row. Add anotations to connected files. + """ + Depending on upload type add Ids to entityId row. Add anotations to connected + files and folders. Despite the name of this function, it also applies to folders. + Args: dmge: DataModelGraphExplorer Object manifest (pd.DataFrame): loaded df containing user supplied data. @@ -2052,6 +2298,9 @@ async def add_annotations_to_entities_files( row["entityId"] = manifest_synapse_table_id manifest.loc[idx, "entityId"] = manifest_synapse_table_id entityId = "" + # If the row is the manifest table, do not add annotations + elif row["entityId"] == manifest_synapse_table_id: + entityId = "" else: # get the file id of the file to annotate, collected in above step. entityId = row["entityId"] @@ -2132,22 +2381,28 @@ def upload_manifest_as_table( ) # Load manifest to synapse as a CSV File manifest_synapse_file_id = self.upload_manifest_file( - manifest, - metadataManifestPath, - datasetId, - restrict, + manifest=manifest, + metadataManifestPath=metadataManifestPath, + datasetId=datasetId, + restrict_manifest=restrict, component_name=component_name, ) # Set annotations for the file manifest. manifest_annotations = self.format_manifest_annotations( - manifest, manifest_synapse_file_id + manifest=manifest, manifest_synapse_id=manifest_synapse_file_id ) - self.syn.set_annotations(manifest_annotations) + annos = self.syn.set_annotations(annotations=manifest_annotations) + manifest_entity = self.synapse_entity_tracker.get( + synapse_id=manifest_synapse_file_id, syn=self.syn, download_file=False + ) + manifest_entity.annotations = annos + manifest_entity.etag = annos.etag + logger.info("Associated manifest file with dataset on Synapse.") # Update manifest Synapse table with new entity id column. - manifest_synapse_table_id, manifest, table_manifest = self.uploadDB( + manifest_synapse_table_id, manifest, _ = self.uploadDB( dmge=dmge, manifest=manifest, datasetId=datasetId, @@ -2159,9 +2414,17 @@ def upload_manifest_as_table( # Set annotations for the table manifest manifest_annotations = self.format_manifest_annotations( - manifest, manifest_synapse_table_id + manifest=manifest, manifest_synapse_id=manifest_synapse_table_id ) - self.syn.set_annotations(manifest_annotations) + annotations_manifest_table = self.syn.set_annotations( + annotations=manifest_annotations + ) + manifest_table_entity = self.synapse_entity_tracker.get( + synapse_id=manifest_synapse_table_id, syn=self.syn, download_file=False + ) + manifest_table_entity.annotations = annotations_manifest_table + manifest_table_entity.etag = annotations_manifest_table.etag + return manifest_synapse_file_id @tracer.start_as_current_span("SynapseStorage::upload_manifest_as_csv") @@ -2219,7 +2482,12 @@ def upload_manifest_as_csv( manifest_annotations = self.format_manifest_annotations( manifest, manifest_synapse_file_id ) - self.syn.set_annotations(manifest_annotations) + annos = self.syn.set_annotations(manifest_annotations) + manifest_entity = self.synapse_entity_tracker.get( + synapse_id=manifest_synapse_file_id, syn=self.syn, download_file=False + ) + manifest_entity.annotations = annos + manifest_entity.etag = annos.etag logger.info("Associated manifest file with dataset on Synapse.") @@ -2296,7 +2564,12 @@ def upload_manifest_combo( manifest_annotations = self.format_manifest_annotations( manifest, manifest_synapse_file_id ) - self.syn.set_annotations(manifest_annotations) + file_manifest_annoations = self.syn.set_annotations(manifest_annotations) + manifest_entity = self.synapse_entity_tracker.get( + synapse_id=manifest_synapse_file_id, syn=self.syn, download_file=False + ) + manifest_entity.annotations = file_manifest_annoations + manifest_entity.etag = file_manifest_annoations.etag logger.info("Associated manifest file with dataset on Synapse.") # Update manifest Synapse table with new entity id column. @@ -2314,7 +2587,12 @@ def upload_manifest_combo( manifest_annotations = self.format_manifest_annotations( manifest, manifest_synapse_table_id ) - self.syn.set_annotations(manifest_annotations) + table_manifest_annotations = self.syn.set_annotations(manifest_annotations) + manifest_entity = self.synapse_entity_tracker.get( + synapse_id=manifest_synapse_table_id, syn=self.syn, download_file=False + ) + manifest_entity.annotations = table_manifest_annotations + manifest_entity.etag = table_manifest_annotations.etag return manifest_synapse_file_id @tracer.start_as_current_span("SynapseStorage::associateMetadataWithFiles") @@ -2444,7 +2722,9 @@ def getTableAnnotations(self, table_id: str): dict: Annotations as comma-separated strings. """ try: - entity = self.syn.get(table_id, downloadFile=False) + entity = self.synapse_entity_tracker.get( + synapse_id=table_id, syn=self.syn, download_file=False + ) is_table = entity.concreteType.endswith(".TableEntity") annotations_raw = entity.annotations except SynapseHTTPError: @@ -2476,7 +2756,9 @@ def getFileAnnotations(self, fileId: str) -> Dict[str, str]: # Get entity metadata, including annotations try: - entity = self.syn.get(fileId, downloadFile=False) + entity = self.synapse_entity_tracker.get( + synapse_id=fileId, syn=self.syn, download_file=False + ) is_file = entity.concreteType.endswith(".FileEntity") is_folder = entity.concreteType.endswith(".Folder") annotations_raw = entity.annotations @@ -2549,7 +2831,7 @@ def getDatasetAnnotations( try: logger.info("Trying batch mode for retrieving Synapse annotations") table = self.getDatasetAnnotationsBatch(datasetId, dataset_file_ids) - except (SynapseAuthenticationError, SynapseHTTPError): + except (SynapseAuthenticationError, SynapseHTTPError, ValueError): logger.info( f"Unable to create a temporary file view bound to {datasetId}. " "Defaulting to slower iterative retrieval of annotations." @@ -2629,7 +2911,7 @@ def getDatasetProject(self, datasetId: str) -> str: # re-query if no datasets found if dataset_row.empty: sleep(5) - self.query_fileview() + self.query_fileview(force_requery=True) # Subset main file view dataset_index = self.storageFileviewTable["id"] == datasetId dataset_row = self.storageFileviewTable[dataset_index] @@ -2641,7 +2923,9 @@ def getDatasetProject(self, datasetId: str) -> str: # Otherwise, check if already project itself try: - syn_object = self.syn.get(datasetId) + syn_object = self.synapse_entity_tracker.get( + synapse_id=datasetId, syn=self.syn, download_file=False + ) if syn_object.properties["concreteType"].endswith("Project"): return datasetId except SynapseHTTPError: @@ -2717,6 +3001,7 @@ def __init__( datasetId: str = None, existingTableId: str = None, restrict: bool = False, + synapse_entity_tracker: SynapseEntityTracker = None, ): """ Class governing table operations (creation, replacement, upserts, updates) in schematic @@ -2726,6 +3011,7 @@ def __init__( datasetId: synID of the dataset for the manifest existingTableId: synId of the table currently exising on synapse (if there is one) restrict: bool, whether or not the manifest contains sensitive data that will need additional access restrictions + synapse_entity_tracker: Tracker for a pull-through cache of Synapse entities """ self.synStore = synStore @@ -2734,6 +3020,7 @@ def __init__( self.datasetId = datasetId self.existingTableId = existingTableId self.restrict = restrict + self.synapse_entity_tracker = synapse_entity_tracker or SynapseEntityTracker() @tracer.start_as_current_span("TableOperations::createTable") def createTable( @@ -2751,8 +3038,9 @@ def createTable( Returns: table.schema.id: synID of the newly created table """ - - datasetEntity = self.synStore.syn.get(self.datasetId, downloadFile=False) + datasetEntity = self.synapse_entity_tracker.get( + synapse_id=self.datasetId, syn=self.synStore.syn, download_file=False + ) datasetName = datasetEntity.name table_schema_by_cname = self.synStore._get_table_schema_by_cname(columnTypeDict) @@ -2796,12 +3084,18 @@ def createTable( ) table = Table(schema, self.tableToLoad) table = self.synStore.syn.store(table, isRestricted=self.restrict) + # Commented out until https://sagebionetworks.jira.com/browse/PLFM-8605 is resolved + # self.synapse_entity_tracker.add(synapse_id=table.schema.id, entity=table.schema) + self.synapse_entity_tracker.remove(synapse_id=table.schema.id) return table.schema.id else: # For just uploading the tables to synapse using default # column types. table = build_table(self.tableName, datasetParentProject, self.tableToLoad) table = self.synStore.syn.store(table, isRestricted=self.restrict) + # Commented out until https://sagebionetworks.jira.com/browse/PLFM-8605 is resolved + # self.synapse_entity_tracker.add(synapse_id=table.schema.id, entity=table.schema) + self.synapse_entity_tracker.remove(synapse_id=table.schema.id) return table.schema.id @tracer.start_as_current_span("TableOperations::replaceTable") @@ -2820,7 +3114,10 @@ def replaceTable( Returns: existingTableId: synID of the already existing table that had its metadata replaced """ - datasetEntity = self.synStore.syn.get(self.datasetId, downloadFile=False) + datasetEntity = self.synapse_entity_tracker.get( + synapse_id=self.datasetId, syn=self.synStore.syn, download_file=False + ) + datasetName = datasetEntity.name table_schema_by_cname = self.synStore._get_table_schema_by_cname(columnTypeDict) existing_table, existing_results = self.synStore.get_synapse_table( @@ -2828,11 +3125,16 @@ def replaceTable( ) # remove rows self.synStore.syn.delete(existing_results) + # Data changes such as removing all rows causes the eTag to change. + self.synapse_entity_tracker.remove(synapse_id=self.existingTableId) # wait for row deletion to finish on synapse before getting empty table sleep(10) # removes all current columns - current_table = self.synStore.syn.get(self.existingTableId) + current_table = self.synapse_entity_tracker.get( + synapse_id=self.existingTableId, syn=self.synStore.syn, download_file=False + ) + current_columns = self.synStore.syn.getTableColumns(current_table) for col in current_columns: current_table.removeColumn(col) @@ -2880,7 +3182,12 @@ def replaceTable( # adds new columns to schema for col in cols: current_table.addColumn(col) - self.synStore.syn.store(current_table, isRestricted=self.restrict) + table_result = self.synStore.syn.store( + current_table, isRestricted=self.restrict + ) + # Commented out until https://sagebionetworks.jira.com/browse/PLFM-8605 is resolved + # self.synapse_entity_tracker.add(synapse_id=table_result.schema.id, entity=table_result.schema) + self.synapse_entity_tracker.remove(synapse_id=table_result.id) # wait for synapse store to finish sleep(1) @@ -2892,6 +3199,9 @@ def replaceTable( schema.id = self.existingTableId table = Table(schema, self.tableToLoad, etag=existing_results.etag) table = self.synStore.syn.store(table, isRestricted=self.restrict) + # Commented out until https://sagebionetworks.jira.com/browse/PLFM-8605 is resolved + # self.synapse_entity_tracker.add(synapse_id=table.schema.id, entity=table.schema) + self.synapse_entity_tracker.remove(synapse_id=table.schema.id) else: logging.error("Must specify a schema for table replacements") @@ -2929,7 +3239,7 @@ def _get_auth_token( # Try getting creds from .synapseConfig file if it exists # Primarily useful for local users. Seems to correlate with credentials stored in synaspe object when logged in if os.path.exists(CONFIG.synapse_configuration_path): - config = self.synStore.syn.getConfigFile(CONFIG.synapse_configuration_path) + config = get_config_file(CONFIG.synapse_configuration_path) # check which credentials are provided in file if config.has_option("authentication", "authtoken"): @@ -2964,6 +3274,8 @@ def upsertTable(self, dmge: DataModelGraphExplorer): synapseDB = SynapseDatabase( auth_token=authtoken, project_id=self.synStore.getDatasetProject(self.datasetId), + syn=self.synStore.syn, + synapse_entity_tracker=self.synapse_entity_tracker, ) try: @@ -3000,7 +3312,10 @@ def _update_table_uuid_column( """ # Get the columns of the schema - schema = self.synStore.syn.get(self.existingTableId) + schema = self.synapse_entity_tracker.get( + synapse_id=self.existingTableId, syn=self.synStore.syn, download_file=False + ) + cols = self.synStore.syn.getTableColumns(schema) # Iterate through columns until `Uuid` column is found @@ -3017,6 +3332,9 @@ def _update_table_uuid_column( new_col = Column(columnType="STRING", maximumSize=64, name="Id") schema.addColumn(new_col) schema = self.synStore.syn.store(schema) + # self.synapse_entity_tracker.add(synapse_id=schema.id, entity=schema) + # Commented out until https://sagebionetworks.jira.com/browse/PLFM-8605 is resolved + self.synapse_entity_tracker.remove(synapse_id=schema.id) # If there is not, then use the old `Uuid` column as a basis for the new `Id` column else: # Build ColumnModel that will be used for new column @@ -3070,10 +3388,15 @@ def updateTable( self.tableToLoad = update_df(existing_table, self.tableToLoad, update_col) # store table with existing etag data and impose restrictions as appropriate - self.synStore.syn.store( + table_result = self.synStore.syn.store( Table(self.existingTableId, self.tableToLoad, etag=existing_results.etag), isRestricted=self.restrict, ) + # We cannot store the Table to the `synapse_entity_tracker` because there is + # not `Schema` on the table object. The above `.store()` function call would + # also update the ETag of the entity within Synapse. Remove it from the tracker + # and re-retrieve it later on if needed again. + self.synapse_entity_tracker.remove(synapse_id=table_result.tableId) return self.existingTableId @@ -3191,6 +3514,12 @@ def _fix_default_columns(self): # Rename ROW_ETAG column to eTag and place at end of data frame if "ROW_ETAG" in self.table: row_etags = self.table.pop("ROW_ETAG") + + # eTag column may already present if users annotated data without submitting manifest + # we're only concerned with the new values and not the existing ones + if "eTag" in self.table: + del self.table["eTag"] + self.table.insert(len(self.table.columns), "eTag", row_etags) return self.table diff --git a/schematic/store/synapse_tracker.py b/schematic/store/synapse_tracker.py new file mode 100644 index 000000000..6d163c1e0 --- /dev/null +++ b/schematic/store/synapse_tracker.py @@ -0,0 +1,147 @@ +"""This script is responsible for creating a 'pull through cache' class that can be +added through composition to any class where Synapse entities might be used. The idea +behind this class is to provide a mechanism such that if a Synapse entity is requested +multiple times, the entity is only downloaded once. This is useful for preventing +multiple downloads of the same entity, which can be time consuming.""" +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Union + +import synapseclient +from synapseclient import Entity, File, Folder, Project, Schema + + +@dataclass +class SynapseEntityTracker: + """The SynapseEntityTracker class handles tracking synapse entities throughout the + lifecycle of a request to schematic. It is used to prevent multiple downloads of + the same entity.""" + + synapse_entities: Dict[str, Union[Entity, Project, File, Folder, Schema]] = field( + default_factory=dict + ) + project_headers: Dict[str, List[Dict[str, str]]] = field(default_factory=dict) + """A dictionary of project headers for each user requested.""" + + def get( + self, + synapse_id: str, + syn: synapseclient.Synapse, + download_file: bool = False, + retrieve_if_not_present: bool = True, + download_location: str = None, + if_collision: str = None, + ) -> Optional[Union[Entity, Project, File, Folder, Schema]]: + """Retrieves a Synapse entity from the cache if it exists, otherwise downloads + the entity from Synapse and adds it to the cache. + + Args: + synapse_id: The Synapse ID of the entity to retrieve. + syn: A Synapse object. + download_file: If True, download the file. + retrieve_if_not_present: If True, retrieve the entity if it is not present + in the cache. If not found in the cache, and this is False, return None. + download_location: The location to download the file to. + if_collision: The action to take if there is a collision when downloading + the file. May be "overwrite.local", "keep.local", or "keep.both". A + collision occurs when a file with the same name already exists at the + download location. + + Returns: + The Synapse entity if found. When retrieve_if_not_present is False and the + entity is not found in the local cache, returns None. If + retrieve_if_not_present is True and the entity is not found in the local + cache, retrieve the entity from Synapse and add it to the cache. + """ + entity = self.synapse_entities.get(synapse_id, None) + + if entity is None or (download_file and not entity.path): + if not retrieve_if_not_present: + return None + entity = syn.get( + synapse_id, + downloadFile=download_file, + downloadLocation=download_location, + ifcollision=if_collision, + ) + self.synapse_entities.update({synapse_id: entity}) + return entity + + def add( + self, synapse_id: str, entity: Union[Entity, Project, File, Folder, Schema] + ) -> None: + """Adds a Synapse entity to the cache. + + Args: + synapse_id: The Synapse ID of the entity to add. + entity: The Synapse entity to add. + """ + self.synapse_entities.update({synapse_id: entity}) + + def remove(self, synapse_id: str) -> None: + """Removes a Synapse entity from the cache. + + Args: + synapse_id: The Synapse ID of the entity to remove. + """ + self.synapse_entities.pop(synapse_id, None) + + def search_local_by_parent_and_name( + self, name: str, parent_id: str + ) -> Union[Entity, Project, File, Folder, Schema, None]: + """ + Searches the local cache for an entity with the given name and parent_id. The + is useful in situations where we might not have the ID of the resource, but we + do have the name and parent ID. + + Args: + name: The name of the entity to search for. + parent_id: The parent ID of the entity to search for. + + Returns: + The entity if it exists, otherwise None. + """ + for entity in self.synapse_entities.values(): + if entity.name == name and entity.parentId == parent_id: + return entity + return None + + def get_project_headers( + self, syn: synapseclient.Synapse, current_user_id: str + ) -> List[Dict[str, str]]: + """Gets the paginated results of the REST call to Synapse to check what projects the current user has access to. + + Args: + syn: A Synapse object + current_user_id: profile id for the user whose projects we want to get. + + Returns: + A list of dictionaries matching + """ + project_headers = self.project_headers.get(current_user_id, None) + if project_headers: + return project_headers + + all_results = syn.restGET( + "/projects/user/{principalId}".format(principalId=current_user_id) + ) + + while ( + "nextPageToken" in all_results + ): # iterate over next page token in results while there is any + results_token = syn.restGET( + "/projects/user/{principalId}?nextPageToken={nextPageToken}".format( + principalId=current_user_id, + nextPageToken=all_results["nextPageToken"], + ) + ) + all_results["results"].extend(results_token["results"]) + + if "nextPageToken" in results_token: + all_results["nextPageToken"] = results_token["nextPageToken"] + else: + del all_results["nextPageToken"] + + results = all_results["results"] + self.project_headers.update({current_user_id: results}) + + return results diff --git a/schematic/utils/general.py b/schematic/utils/general.py index 974805043..0bb932aa3 100644 --- a/schematic/utils/general.py +++ b/schematic/utils/general.py @@ -10,13 +10,15 @@ from cProfile import Profile from datetime import datetime, timedelta from functools import wraps -from typing import Union, TypeVar, Any, Optional, Sequence, Callable +from typing import Any, Callable, Optional, Sequence, TypeVar, Union +from synapseclient import Synapse # type: ignore +from synapseclient.core import cache # type: ignore from synapseclient.core.exceptions import SynapseHTTPError # type: ignore from synapseclient.entity import File, Folder, Project # type: ignore from synapseclient.table import EntityViewSchema # type: ignore -from synapseclient.core import cache # type: ignore -from synapseclient import Synapse # type: ignore + +from schematic.store.synapse_tracker import SynapseEntityTracker logger = logging.getLogger(__name__) @@ -180,12 +182,17 @@ def clear_synapse_cache(synapse_cache: cache.Cache, minutes: int) -> int: return num_of_deleted_files -def entity_type_mapping(syn: Synapse, entity_id: str) -> str: +def entity_type_mapping( + syn: Synapse, + entity_id: str, + synapse_entity_tracker: Optional[SynapseEntityTracker] = None, +) -> str: """Return the entity type of manifest Args: syn (Synapse): Synapse object entity_id (str): id of an entity + synapse_entity_tracker: Tracker for a pull-through cache of Synapse entities Raises: SynapseHTTPError: Re-raised SynapseHTTPError @@ -195,7 +202,11 @@ def entity_type_mapping(syn: Synapse, entity_id: str) -> str: """ # check the type of entity try: - entity = syn.get(entity_id, downloadFile=False) + if not synapse_entity_tracker: + synapse_entity_tracker = SynapseEntityTracker() + entity = synapse_entity_tracker.get( + synapse_id=entity_id, syn=syn, download_file=False + ) except SynapseHTTPError as exc: logger.error( f"cannot get {entity_id} from asset store. Please make sure that {entity_id} exists" @@ -213,19 +224,24 @@ def entity_type_mapping(syn: Synapse, entity_id: str) -> str: elif isinstance(entity, Project): entity_type = "project" else: + assert entity is not None # if there's no matching type, return concreteType entity_type = entity.concreteType return entity_type -def create_temp_folder(path: str) -> str: +def create_temp_folder(path: str, prefix: Optional[str] = None) -> str: """This function creates a temporary directory in the specified directory Args: path(str): a directory path where all the temporary files will live + prefix(str): a prefix to be added to the temporary directory name Returns: returns the absolute pathname of the new directory. """ + if not os.path.exists(path): + os.makedirs(path, exist_ok=True) + # Create a temporary directory in the specified directory - path = tempfile.mkdtemp(dir=path) + path = tempfile.mkdtemp(dir=path, prefix=prefix) return path diff --git a/schematic/utils/io_utils.py b/schematic/utils/io_utils.py index 1651d085e..a0bb9d241 100644 --- a/schematic/utils/io_utils.py +++ b/schematic/utils/io_utils.py @@ -1,9 +1,10 @@ """io utils""" -from typing import Any import json +import os +import time import urllib.request -from schematic import LOADER +from typing import Any def load_json(file_path: str) -> Any: @@ -31,6 +32,9 @@ def export_json(json_doc: Any, file_path: str) -> None: def load_default() -> Any: """Load biolink vocabulary""" data_path = "data_models/biothings.model.jsonld" + # Lazy import to avoid circular imports + from schematic import LOADER # pylint: disable=import-outside-toplevel + biothings_path = LOADER.filename(data_path) return load_json(biothings_path) @@ -38,5 +42,40 @@ def load_default() -> Any: def load_schemaorg() -> Any: """Load SchemaOrg vocabulary""" data_path = "data_models/schema_org.model.jsonld" + # Lazy import to avoid circular imports + from schematic import LOADER # pylint: disable=import-outside-toplevel + schema_org_path = LOADER.filename(data_path) return load_json(schema_org_path) + + +def cleanup_temporary_storage( + temporary_storage_directory: str, time_delta_seconds: int +) -> None: + """Handles cleanup of temporary storage directory. The usage of the + `time_delta_seconds` parameter is to prevent deleting files that are currently + being used by other requests. In production we will be deleting those files + which have not been modified for more than 1 hour. + + Args: + temporary_storage_directory: Path to the temporary storage directory. + time_delta_seconds: The time delta in seconds used to determine which files + should be deleted. + """ + if os.path.exists(temporary_storage_directory): + for root, all_dirs, files in os.walk( + temporary_storage_directory, topdown=False + ): + # Delete files older than the specified time delta + for file in files: + file_path = os.path.join(root, file) + if os.path.isfile(file_path) and os.path.getmtime(file_path) < ( + time.time() - time_delta_seconds + ): + os.remove(file_path) + + # Delete empty directories + for all_dir in all_dirs: + dir_path = os.path.join(root, all_dir) + if not os.listdir(dir_path): + os.rmdir(dir_path) diff --git a/schematic/utils/validate_utils.py b/schematic/utils/validate_utils.py index 5f50dfb02..faaf7e23a 100644 --- a/schematic/utils/validate_utils.py +++ b/schematic/utils/validate_utils.py @@ -2,16 +2,17 @@ # pylint: disable = anomalous-backslash-in-string +import logging import re from collections.abc import Mapping -import logging -from typing import Pattern, Union, Iterable, Any, Optional from numbers import Number -from jsonschema import validate +from typing import Any, Iterable, Optional, Pattern, Union + import numpy as np import pandas as pd +from jsonschema import validate + from schematic.utils.io_utils import load_json -from schematic import LOADER logger = logging.getLogger(__name__) @@ -19,6 +20,9 @@ def validate_schema(schema: Union[Mapping, bool]) -> None: """Validate schema against schema.org standard""" data_path = "validation_schemas/model.schema.json" + # Lazy import to avoid circular imports + from schematic import LOADER # pylint: disable=import-outside-toplevel + json_schema_path = LOADER.filename(data_path) json_schema = load_json(json_schema_path) return validate(schema, json_schema) @@ -27,6 +31,9 @@ def validate_schema(schema: Union[Mapping, bool]) -> None: def validate_property_schema(schema: Union[Mapping, bool]) -> None: """Validate schema against SchemaORG property definition standard""" data_path = "validation_schemas/property.schema.json" + # Lazy import to avoid circular imports + from schematic import LOADER # pylint: disable=import-outside-toplevel + json_schema_path = LOADER.filename(data_path) json_schema = load_json(json_schema_path) return validate(schema, json_schema) @@ -35,6 +42,9 @@ def validate_property_schema(schema: Union[Mapping, bool]) -> None: def validate_class_schema(schema: Union[Mapping, bool]) -> None: """Validate schema against SchemaORG class definition standard""" data_path = "validation_schemas/class.schema.json" + # Lazy import to avoid circular imports + from schematic import LOADER # pylint: disable=import-outside-toplevel + json_schema_path = LOADER.filename(data_path) json_schema = load_json(json_schema_path) return validate(schema, json_schema) diff --git a/schematic/visualization/attributes_explorer.py b/schematic/visualization/attributes_explorer.py index 9691932e7..668ea1374 100644 --- a/schematic/visualization/attributes_explorer.py +++ b/schematic/visualization/attributes_explorer.py @@ -3,14 +3,15 @@ import logging import os from typing import Optional, no_type_check + import numpy as np import pandas as pd -from schematic.schemas.data_model_parser import DataModelParser from schematic.schemas.data_model_graph import DataModelGraph, DataModelGraphExplorer from schematic.schemas.data_model_json_schema import DataModelJSONSchema -from schematic.utils.schema_utils import DisplayLabelType +from schematic.schemas.data_model_parser import DataModelParser from schematic.utils.io_utils import load_json +from schematic.utils.schema_utils import DisplayLabelType logger = logging.getLogger(__name__) @@ -40,6 +41,7 @@ def __init__( # Instantiate DataModelGraph if not data_model_grapher: + assert parsed_data_model is not None data_model_grapher = DataModelGraph(parsed_data_model, data_model_labels) # Generate graph diff --git a/schematic_api/api/__init__.py b/schematic_api/api/__init__.py index a65398ee5..299353935 100644 --- a/schematic_api/api/__init__.py +++ b/schematic_api/api/__init__.py @@ -1,30 +1,14 @@ import os +import traceback +from typing import Tuple import connexion -from typing import Tuple +from synapseclient.core.exceptions import SynapseAuthenticationError -import traceback -from synapseclient.core.exceptions import ( - SynapseAuthenticationError, -) from schematic.exceptions import AccessCredentialsError -from schematic import CONFIG -from jaeger_client import Config -from flask_opentracing import FlaskTracer -config = Config( - config={ - "enabled": True, - "sampler": {"type": "const", "param": 1}, - "logging": True, - }, - service_name="schema-api", -) -jaeger_tracer = config.initialize_tracer - - -def create_app(): +def create_app() -> None: connexionapp = connexion.FlaskApp(__name__, specification_dir="openapi/") connexionapp.add_api( "api.yaml", arguments={"title": "Schematic REST API"}, pythonic_params=True @@ -71,11 +55,6 @@ def handle_synapse_access_error(e: Exception) -> Tuple[str, int]: app = create_app() -flask_tracer = FlaskTracer( - jaeger_tracer, True, app, ["url", "url_rule", "environ.HTTP_X_REAL_IP", "path"] -) - - # def route_code(): # import flask_schematic as sc # sc.method1() diff --git a/schematic_api/api/routes.py b/schematic_api/api/routes.py index e977e480d..97484c15c 100644 --- a/schematic_api/api/routes.py +++ b/schematic_api/api/routes.py @@ -4,9 +4,7 @@ import pickle import shutil import tempfile -import time import urllib.request -from functools import wraps from typing import List, Tuple import connexion @@ -15,16 +13,6 @@ from flask import request, send_from_directory from flask_cors import cross_origin from opentelemetry import trace -from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter -from opentelemetry.sdk.resources import SERVICE_NAME, Resource -from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import ( - BatchSpanProcessor, - ConsoleSpanExporter, - SimpleSpanProcessor, - Span, -) -from opentelemetry.sdk.trace.sampling import ALWAYS_OFF from schematic.configuration.configuration import CONFIG from schematic.manifest.generator import ManifestGenerator @@ -32,7 +20,7 @@ from schematic.schemas.data_model_graph import DataModelGraph, DataModelGraphExplorer from schematic.schemas.data_model_parser import DataModelParser from schematic.store.synapse import ManifestDownload, SynapseStorage -from schematic.utils.general import entity_type_mapping +from schematic.utils.general import create_temp_folder, entity_type_mapping from schematic.utils.schema_utils import ( DisplayLabelType, get_property_label_from_display_name, @@ -43,77 +31,10 @@ logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG) -tracing_service_name = os.environ.get("TRACING_SERVICE_NAME", "schematic-api") - -trace.set_tracer_provider( - TracerProvider(resource=Resource(attributes={SERVICE_NAME: tracing_service_name})) -) - - -# borrowed from: https://github.com/Sage-Bionetworks/synapsePythonClient/blob/develop/tests/integration/conftest.py -class FileSpanExporter(ConsoleSpanExporter): - """Create an exporter for OTEL data to a file.""" - - def __init__(self, file_path: str) -> None: - """Init with a path.""" - self.file_path = file_path - - def export(self, spans: List[Span]) -> None: - """Export the spans to the file.""" - with open(self.file_path, "a", encoding="utf-8") as f: - for span in spans: - span_json_one_line = span.to_json().replace("\n", "") + "\n" - f.write(span_json_one_line) - - -def set_up_tracing() -> None: - """Set up tracing for the API.""" - tracing_export = os.environ.get("TRACING_EXPORT_FORMAT", None) - if tracing_export == "otlp": - trace.get_tracer_provider().add_span_processor( - BatchSpanProcessor(OTLPSpanExporter()) - ) - elif tracing_export == "file": - timestamp_millis = int(time.time() * 1000) - file_name = f"otel_spans_integration_testing_{timestamp_millis}.ndjson" - file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), file_name) - processor = SimpleSpanProcessor(FileSpanExporter(file_path)) - trace.get_tracer_provider().add_span_processor(processor) - else: - trace.set_tracer_provider(TracerProvider(sampler=ALWAYS_OFF)) - -set_up_tracing() tracer = trace.get_tracer("Schematic") -def trace_function_params(): - """capture all the parameters of API requests""" - - def decorator(func): - """create a decorator""" - - @wraps(func) - def wrapper(*args, **kwargs): - """create a wrapper function. Any number of positional arguments and keyword arguments can be passed here.""" - tracer = trace.get_tracer(__name__) - # Start a new span with the function's name - with tracer.start_as_current_span(func.__name__) as span: - # Set values of parameters as tags - for i, arg in enumerate(args): - span.set_attribute(f"arg{i}", arg) - - for name, value in kwargs.items(): - span.set_attribute(name, value) - # Call the actual function - result = func(*args, **kwargs) - return result - - return wrapper - - return decorator - - def config_handler(asset_view: str = None): # check if path to config is provided path_to_config = app.config["SCHEMATIC_CONFIG"] @@ -182,7 +103,7 @@ def convert_df_to_csv(self, df, file_name): """ # convert dataframe to a temporary csv file - temp_dir = tempfile.gettempdir() + temp_dir = create_temp_folder(path=tempfile.gettempdir()) temp_path = os.path.join(temp_dir, file_name) df.to_csv(temp_path, encoding="utf-8", index=False) return temp_path @@ -271,7 +192,7 @@ def save_file(file_key="csv_file"): manifest_file = connexion.request.files[file_key] # save contents of incoming manifest CSV file to temp file - temp_dir = tempfile.gettempdir() + temp_dir = create_temp_folder(path=tempfile.gettempdir()) # path to temp file where manifest file contents will be saved temp_path = os.path.join(temp_dir, manifest_file.filename) # save content @@ -296,7 +217,9 @@ def get_temp_jsonld(schema_url): # retrieve a JSON-LD via URL and store it in a temporary location with urllib.request.urlopen(schema_url) as response: with tempfile.NamedTemporaryFile( - delete=False, suffix=".model.jsonld" + delete=False, + suffix=".model.jsonld", + dir=create_temp_folder(path=tempfile.gettempdir()), ) as tmp_file: shutil.copyfileobj(response, tmp_file) @@ -307,13 +230,18 @@ def get_temp_jsonld(schema_url): def get_temp_csv(schema_url): # retrieve a CSV via URL and store it in a temporary location with urllib.request.urlopen(schema_url) as response: - with tempfile.NamedTemporaryFile(delete=False, suffix=".model.csv") as tmp_file: + with tempfile.NamedTemporaryFile( + delete=False, + suffix=".model.csv", + dir=create_temp_folder(path=tempfile.gettempdir()), + ) as tmp_file: shutil.copyfileobj(response, tmp_file) # get path to temporary csv file return tmp_file.name +@tracer.start_as_current_span("routes::get_temp_model_path") def get_temp_model_path(schema_url): # Get model type: model_extension = pathlib.Path(schema_url).suffix.replace(".", "").upper() @@ -329,7 +257,6 @@ def get_temp_model_path(schema_url): # @before_request -@trace_function_params() def get_manifest_route( schema_url: str, use_annotations: bool, @@ -392,7 +319,6 @@ def get_manifest_route( return all_results -@trace_function_params() def validate_manifest_route( schema_url, data_type, @@ -451,7 +377,6 @@ def validate_manifest_route( # profile validate manifest route function -@trace_function_params() def submit_manifest_route( schema_url, data_model_labels: str, @@ -496,9 +421,6 @@ def submit_manifest_route( else: validate_component = data_type - # get path to temp data model file (csv or jsonld) as appropriate - data_model = get_temp_model_path(schema_url) - if not table_column_names: table_column_names = "class_label" @@ -638,7 +560,10 @@ def check_entity_type(entity_id): config_handler() syn = SynapseStorage.login(access_token=access_token) - entity_type = entity_type_mapping(syn, entity_id) + current_span = trace.get_current_span() + if current_span.is_recording(): + current_span.set_attribute("user.id", syn.credentials.owner_id) + entity_type = entity_type_mapping(syn=syn, entity_id=entity_id) return entity_type @@ -736,9 +661,12 @@ def download_manifest(manifest_id, new_manifest_name="", as_json=True): # use login method in synapse storage syn = SynapseStorage.login(access_token=access_token) + current_span = trace.get_current_span() + if current_span.is_recording(): + current_span.set_attribute("user.id", syn.credentials.owner_id) try: md = ManifestDownload(syn, manifest_id) - manifest_data = ManifestDownload.download_manifest(md, new_manifest_name) + manifest_data = md.download_manifest(newManifestName=new_manifest_name) # return local file path manifest_local_file_path = manifest_data["path"] except TypeError as e: diff --git a/tests/conftest.py b/tests/conftest.py index e6382ed40..2f9dd3047 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -4,21 +4,16 @@ import os import shutil import sys +import tempfile +from dataclasses import dataclass from typing import Callable, Generator, Set +import flask import pytest from dotenv import load_dotenv +from flask.testing import FlaskClient from opentelemetry import trace -from opentelemetry._logs import set_logger_provider -from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter -from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter -from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler -from opentelemetry.sdk._logs.export import BatchLogRecordProcessor -from opentelemetry.sdk.resources import SERVICE_NAME, Resource -from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import BatchSpanProcessor -from opentelemetry.sdk.trace.sampling import ALWAYS_OFF -from pytest_asyncio import is_async_test +from synapseclient.client import Synapse from schematic.configuration.configuration import CONFIG, Configuration from schematic.models.metadata import MetadataModel @@ -26,6 +21,8 @@ from schematic.schemas.data_model_parser import DataModelParser from schematic.store.synapse import SynapseStorage from schematic.utils.df_utils import load_df +from schematic.utils.general import create_temp_folder +from schematic_api.api import create_app from tests.utils import CleanupAction, CleanupItem tracer = trace.get_tracer("Schematic-Tests") @@ -51,9 +48,26 @@ def dataset_id(): yield "syn25614635" +@pytest.fixture(scope="class") +def flask_app() -> flask.Flask: + """Create a Flask app for testing.""" + app = create_app() + return app + + +@pytest.fixture(scope="class") +def flask_client(flask_app: flask.Flask) -> Generator[FlaskClient, None, None]: + flask_app.config["SCHEMATIC_CONFIG"] = None + + with flask_app.test_client() as client: + yield client + + # This class serves as a container for helper functions that can be # passed to individual tests using the `helpers` fixture. This approach # was required because fixture functions cannot take arguments. + + class Helpers: @staticmethod def get_data_path(path, *paths): @@ -121,21 +135,75 @@ def get_python_project(self): return python_projects[version] -@pytest.fixture(scope="session") +@pytest.fixture(scope="function") def helpers(): yield Helpers -@pytest.fixture(scope="session") +@pytest.fixture(scope="function") def config(): - yield CONFIG + yield Configuration() -@pytest.fixture(scope="session") +@pytest.fixture(scope="function") def synapse_store(): yield SynapseStorage() +@dataclass +class ConfigurationForTesting: + """ + Variables that are specific to testing. Specifically these are used to control + the flags used during manual verification of some integration test results. + + Attributes: + manual_test_verification_enabled (bool): Whether manual verification is enabled. + manual_test_verification_path (str): The path to the directory where manual test + verification files are stored. + use_deployed_schematic_api_server (bool): Used to determine if a local flask + instance is created during integration testing. If this is true schematic + tests will use a schematic API server running outside of the context of the + integration test. + schematic_api_server_url (str): The URL of the schematic API server. Defaults to + http://localhost:3001. + + """ + + manual_test_verification_enabled: bool + manual_test_verification_path: str + use_deployed_schematic_api_server: bool + schematic_api_server_url: str + + +@pytest.fixture(scope="function") +def testing_config(config: Configuration) -> ConfigurationForTesting: + """Configuration variables that are specific to testing.""" + manual_test_verification_enabled = ( + os.environ.get("MANUAL_TEST_VERIFICATION", "false").lower() == "true" + ) + use_deployed_schematic_api_server = ( + os.environ.get("USE_DEPLOYED_SCHEMATIC_API_SERVER", "false").lower() == "true" + ) + schematic_api_server_url = os.environ.get( + "SCHEMATIC_API_SERVER_URL", "http://localhost:3001" + ) + + if manual_test_verification_enabled: + manual_test_verification_path = os.path.join( + config.manifest_folder, "manual_test_verification" + ) + os.makedirs(manual_test_verification_path, exist_ok=True) + else: + manual_test_verification_path = "" + + return ConfigurationForTesting( + manual_test_verification_enabled=manual_test_verification_enabled, + manual_test_verification_path=manual_test_verification_path, + use_deployed_schematic_api_server=use_deployed_schematic_api_server, + schematic_api_server_url=schematic_api_server_url, + ) + + # These fixtures make copies of existing test manifests. # These copies can the be altered by a given test, and the copy will eb destroyed at the # end of the test @@ -167,7 +235,7 @@ def DMGE(helpers: Helpers) -> DataModelGraphExplorer: return dmge -@pytest.fixture(scope="class") +@pytest.fixture(scope="function") def syn_token(config: Configuration): synapse_config_path = config.synapse_configuration_path config_parser = configparser.ConfigParser() @@ -180,6 +248,23 @@ def syn_token(config: Configuration): return token +@pytest.fixture(scope="function") +def syn(syn_token) -> Synapse: + syn = Synapse() + syn.login(authToken=syn_token, silent=True) + return syn + + +@pytest.fixture(scope="session") +def download_location() -> Generator[str, None, None]: + download_location = create_temp_folder(path=tempfile.gettempdir()) + yield download_location + + # Cleanup after tests have used the temp folder + if os.path.exists(download_location): + shutil.rmtree(download_location) + + def metadata_model(helpers, data_model_labels): metadata_model = MetadataModel( inputMModelLocation=helpers.get_data_path("example.model.jsonld"), @@ -227,57 +312,8 @@ def cleanup_scheduled_items() -> None: return _append_cleanup -active_span_processors = [] - - -@pytest.fixture(scope="session", autouse=True) -def set_up_tracing() -> None: - """Set up tracing for the API.""" - tracing_export = os.environ.get("TRACING_EXPORT_FORMAT", None) - tracing_service_name = os.environ.get("TRACING_SERVICE_NAME", "schematic-tests") - if tracing_export == "otlp": - trace.set_tracer_provider( - TracerProvider( - resource=Resource(attributes={SERVICE_NAME: tracing_service_name}) - ) - ) - processor = BatchSpanProcessor(OTLPSpanExporter()) - active_span_processors.append(processor) - trace.get_tracer_provider().add_span_processor(processor) - else: - trace.set_tracer_provider(TracerProvider(sampler=ALWAYS_OFF)) - - @pytest.fixture(autouse=True, scope="function") def wrap_with_otel(request): """Start a new OTEL Span for each test function.""" with tracer.start_as_current_span(request.node.name): - try: - yield - finally: - for processor in active_span_processors: - processor.force_flush() - - -@pytest.fixture(scope="session", autouse=True) -def set_up_logging() -> None: - """Set up logging to export to OTLP.""" - logging_export = os.environ.get("LOGGING_EXPORT_FORMAT", None) - logging_service_name = os.environ.get("LOGGING_SERVICE_NAME", "schematic-tests") - logging_instance_name = os.environ.get("LOGGING_INSTANCE_NAME", "local") - if logging_export == "otlp": - resource = Resource.create( - { - "service.name": logging_service_name, - "service.instance.id": logging_instance_name, - } - ) - - logger_provider = LoggerProvider(resource=resource) - set_logger_provider(logger_provider=logger_provider) - - # TODO: Add support for secure connections - exporter = OTLPLogExporter(insecure=True) - logger_provider.add_log_record_processor(BatchLogRecordProcessor(exporter)) - handler = LoggingHandler(level=logging.NOTSET, logger_provider=logger_provider) - logging.getLogger().addHandler(handler) + yield diff --git a/tests/data/example.model.csv b/tests/data/example.model.csv index a85cf8cbf..7438c7145 100644 --- a/tests/data/example.model.csv +++ b/tests/data/example.model.csv @@ -32,10 +32,10 @@ Check Regex List Like,,,,,TRUE,DataProperty,,,list like::regex match [a-f] Check Regex Single,,,,,TRUE,DataProperty,,,regex search [a-f] Check Regex Format,,,,,TRUE,DataProperty,,,regex match [a-f] Check Regex Integer,,,,,TRUE,DataProperty,,,regex search ^\d+$ -Check Num,,,,,TRUE,DataProperty,,,num -Check Float,,,,,TRUE,DataProperty,,,float -Check Int,,,,,TRUE,DataProperty,,,int -Check String,,,,,TRUE,DataProperty,,,str +Check Num,,,,,TRUE,DataProperty,,,num error +Check Float,,,,,TRUE,DataProperty,,,float error +Check Int,,,,,TRUE,DataProperty,,,int error +Check String,,,,,TRUE,DataProperty,,,str error Check URL,,,,,TRUE,DataProperty,,,url Check Match at Least,,,,,TRUE,DataProperty,,,matchAtLeastOne Patient.PatientID set Check Match Exactly,,,,,TRUE,DataProperty,,,matchExactlyOne MockComponent.checkMatchExactly set diff --git a/tests/data/example.model.jsonld b/tests/data/example.model.jsonld index 3f13b188e..c4279a605 100644 --- a/tests/data/example.model.jsonld +++ b/tests/data/example.model.jsonld @@ -1258,7 +1258,7 @@ "sms:displayName": "Check Num", "sms:required": "sms:true", "sms:validationRules": [ - "num" + "num error" ] }, { @@ -1277,7 +1277,7 @@ "sms:displayName": "Check Float", "sms:required": "sms:true", "sms:validationRules": [ - "float" + "float error" ] }, { @@ -1296,7 +1296,7 @@ "sms:displayName": "Check Int", "sms:required": "sms:true", "sms:validationRules": [ - "int" + "int error" ] }, { @@ -1315,7 +1315,7 @@ "sms:displayName": "Check String", "sms:required": "sms:true", "sms:validationRules": [ - "str" + "str error" ] }, { diff --git a/tests/data/mock_manifests/CLI_tests/CLI_biospecimen.csv b/tests/data/mock_manifests/CLI_tests/CLI_biospecimen.csv new file mode 100644 index 000000000..036bc4ef1 --- /dev/null +++ b/tests/data/mock_manifests/CLI_tests/CLI_biospecimen.csv @@ -0,0 +1,3 @@ +Sample ID,Patient ID,Tissue Status,Component,Id,entityId +123,123,Malignant,Biospecimen,3e413bde-0571-458c-ad93-d56c2b25fadd,syn61260197 +456,456,Healthy,Biospecimen,fc314afe-5714-4b58-8c95-3dfd78dc827d,syn61260197 diff --git a/tests/data/mock_manifests/CLI_tests/CLI_patient_invalid.csv b/tests/data/mock_manifests/CLI_tests/CLI_patient_invalid.csv new file mode 100644 index 000000000..fa5a9abf1 --- /dev/null +++ b/tests/data/mock_manifests/CLI_tests/CLI_patient_invalid.csv @@ -0,0 +1,11 @@ +Patient ID,Sex,Year of Birth,Diagnosis,Component,Cancer Type,Family History +1,Female,,Healthy,Patient,Random,Random +2,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +3,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +4,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +5,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +6,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +7,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +8,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +9,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +10,Female,,Healthy,Patient,Breast,"Colorectal,Breast" diff --git a/tests/data/mock_manifests/MockComponent-cross-manifest-1.csv b/tests/data/mock_manifests/MockComponent-cross-manifest-1.csv new file mode 100644 index 000000000..ea543e9b9 --- /dev/null +++ b/tests/data/mock_manifests/MockComponent-cross-manifest-1.csv @@ -0,0 +1,15 @@ +Component,Check List,Check List Enum,Check List Like,Check List Like Enum,Check List Strict,Check List Enum Strict,Check Regex List,Check Regex List Like,Check Regex List Strict,Check Regex Single,Check Regex Format,Check Regex Integer,Check Num,Check Float,Check Int,Check String,Check URL,Check Match at Least,Check Match at Least values,Check Match Exactly,Check Match Exactly values,Check Match None,Check Match None values,Check Recommended,Check Ages,Check Unique,Check Range,Check Date,Check NA +MockComponent,"a,m,f","ab,cd",ab,ab,"ab,cd","ab,cd,ef","a,b,c",a,"a,b,c",test,"a,b,c,d",0,1.2,0.1,0,test,https://www.google.com/,test,1000,2000,9000,200,200,,6571,200,50,01/22/2005,1 +MockComponent,"a,m,f","ab,cd",cd,ef,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",test,"a,b,c,d",2,1.341592608,2.1,-2,test,https://www.google.com/,test,1200,300,9001,1,1,,6571,1,51,01/23/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",cd,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",test,"a,b,c,d",100,-2.3,3.4,-100,test,https://www.google.com/,test,1300,300,300,3,3,,6571,3,52,01/24/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ab,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",apple,"a,b,c,d",100,100.3,5.166666667,100,apple,https://www.google.com/,apple,1400,300,9000,4,4,,6571,4,53,01/25/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ef,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",orange,"a,b,c,d",4,4.5,6.816666667,4,orange,https://www.google.com/,orange,1500,300,200,5,5,,6571,5,54,01/26/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",cd,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",test,"a,b,c,d",5,6.8,8.466666667,5,test,https://www.google.com/,test,1600,300,300,10,10,,6571,10,55,01/27/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ab,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",test,"a,b,c,d",10,10.2,10.11666667,10,test,https://www.google.com/,test,1700,300,300,100,100,,6571,100,56,01/28/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ef,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",test,"a,b,c,d",9,9,11.76666667,9,test,https://www.google.com/,test,1800,300,300,102,102,,6571,102,57,01/29/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",cd,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",apple,"a,b,c,d",6,6,13.41666667,6,apple,https://www.google.com/,apple,1900,300,300,104,104,,6571,104,58,01/30/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ab,"ab,cd","ab,cd,ef","d,e","a,c","d,e",orange,"a,b,c,d",8,8,15.06666667,8,orange,https://www.google.com/,orange,2000,300,300,109,109,,6571,109,59,01/31/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ef,"ab,cd","ab,cd,ef","d,e","a,c","d,e",test,"a,b,c,d",9,9,16.71666667,9,test,https://www.google.com/,test,2100,300,300,110,110,,6571,110,60,02/01/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",cd,"ab,cd","ab,cd,ef","d,e","a,c","d,e",test,"a,b,c,d",0,0,18.36666667,0,test,https://www.google.com/,test,2200,300,300,111,111,,6571,111,61,02/02/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ab,"ab,cd","ab,cd,ef","d,e","a,c","d,e",test,"a,b,c,d",1,1,20.01666667,1,test,https://www.google.com/,test,2300,300,300,120,120,,6571,120,62,02/03/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ef,"ab,cd","ab,cd,ef","d,e","a,c","d,e",apple,"a,b,c,d",2,2,21.66666667,2,apple,https://www.google.com/,apple,2400,300,300,130,130,,6571,130,63,02/04/2005,1 \ No newline at end of file diff --git a/tests/data/mock_manifests/MockComponent-cross-manifest-2.csv b/tests/data/mock_manifests/MockComponent-cross-manifest-2.csv new file mode 100644 index 000000000..2b391fd39 --- /dev/null +++ b/tests/data/mock_manifests/MockComponent-cross-manifest-2.csv @@ -0,0 +1,15 @@ +Component,Check List,Check List Enum,Check List Like,Check List Like Enum,Check List Strict,Check List Enum Strict,Check Regex List,Check Regex List Like,Check Regex List Strict,Check Regex Single,Check Regex Format,Check Regex Integer,Check Num,Check Float,Check Int,Check String,Check URL,Check Match at Least,Check Match at Least values,Check Match Exactly,Check Match Exactly values,Check Match None,Check Match None values,Check Recommended,Check Ages,Check Unique,Check Range,Check Date,Check NA +MockComponent,"a,m,f","ab,cd",ab,ab,"ab,cd","ab,cd,ef","a,b,c",a,"a,b,c",test,"a,b,c,d",0,1.2,0.1,0,test,https://www.google.com/,test,1000,2000,9001,9,11,,6571,200,50,01/22/2005,1 +MockComponent,"a,m,f","ab,cd",cd,ef,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",test,"a,b,c,d",2,1.341592608,2.1,-2,test,https://www.google.com/,test,1200,300,9001,7,11,,6571,1,51,01/23/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",cd,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",test,"a,b,c,d",100,-2.3,3.4,-100,test,https://www.google.com/,test,1200,300,9001,6,11,,6571,3,52,01/24/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ab,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",apple,"a,b,c,d",100,100.3,5.166666667,100,apple,https://www.google.com/,apple,1200,300,9001,9,11,,6571,4,53,01/25/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ef,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",orange,"a,b,c,d",4,4.5,6.816666667,4,orange,https://www.google.com/,orange,1200,300,9001,7,11,,6571,5,54,01/26/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",cd,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",test,"a,b,c,d",5,6.8,8.466666667,5,test,https://www.google.com/,test,1200,300,9001,6,11,,6571,10,55,01/27/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ab,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",test,"a,b,c,d",10,10.2,10.11666667,10,test,https://www.google.com/,test,1200,300,9001,9,12,,6571,100,56,01/28/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ef,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",test,"a,b,c,d",9,9,11.76666667,9,test,https://www.google.com/,test,1200,300,9001,7,12,,6571,102,57,01/29/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",cd,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",apple,"a,b,c,d",6,6,13.41666667,6,apple,https://www.google.com/,apple,1200,300,9001,6,12,,6571,104,58,01/30/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ab,"ab,cd","ab,cd,ef","d,e","a,c","d,e",orange,"a,b,c,d",8,8,15.06666667,8,orange,https://www.google.com/,orange,1200,300,9001,9,12,,6571,109,59,01/31/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ef,"ab,cd","ab,cd,ef","d,e","a,c","d,e",test,"a,b,c,d",9,9,16.71666667,9,test,https://www.google.com/,test,1200,300,9001,7,12,,6571,110,60,02/01/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",cd,"ab,cd","ab,cd,ef","d,e","a,c","d,e",test,"a,b,c,d",0,0,18.36666667,0,test,https://www.google.com/,test,1200,300,9001,6,12,,6571,111,61,02/02/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ab,"ab,cd","ab,cd,ef","d,e","a,c","d,e",test,"a,b,c,d",1,1,20.01666667,1,test,https://www.google.com/,test,1200,300,9001,9,12,,6571,120,62,02/03/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ef,"ab,cd","ab,cd,ef","d,e","a,c","d,e",apple,"a,b,c,d",2,2,21.66666667,2,apple,https://www.google.com/,apple,1400,300,9001,7,12,,6571,130,63,02/04/2005,1 \ No newline at end of file diff --git a/tests/data/mock_manifests/Mock_Component_rule_combination.csv b/tests/data/mock_manifests/Mock_Component_rule_combination.csv new file mode 100644 index 000000000..08f1babf7 --- /dev/null +++ b/tests/data/mock_manifests/Mock_Component_rule_combination.csv @@ -0,0 +1,15 @@ +Component,Check List,Check List Enum,Check List Like,Check List Like Enum,Check List Strict,Check List Enum Strict,Check Regex List,Check Regex List Like,Check Regex List Strict,Check Regex Single,Check Regex Format,Check Regex Integer,Check Num,Check Float,Check Int,Check String,Check URL,Check Match at Least,Check Match at Least values,Check Match Exactly,Check Match Exactly values,Check Match None,Check Match None values,Check Recommended,Check Ages,Check Unique,Check Range,Check Date,Check NA +MockComponent,"a,m,f","ab,cd",ab,ab,"ab,cd","ab,cd,ef","a,b,c,d,e,f,g,h",a,a,test,"a,b,c,d",0,1.2,0.1,0,test,https://www.google.com/,test,1000,2000,9000,200,200,,6571,200,50,01/22/2005, +MockComponent,"a,m,f","ab,cd",cd,ef,"ab,cd","ab,cd,ef","a,b,c,d","a,c","a,a,b",test,"a,b,c,d",2,1.341592608,2.1,-2,test,https://www.google.com/,test,1200,300,200,1,1,,6571,1,51,01/23/2005,0 +MockComponent,"a,m,f","ab,cd","ab,ef",cd,"ab,cd","ab,cd,ef","a,b,c","a,c,h","a,b,h",test,"a,b,c,d",100,-2.3,3.4,-100,test,https://www.google.com/,test,1300,300,300,3,3,,6571,3,52,01/24/2005,3 +MockComponent,"a,m,f","ab,cd","ab,ef",ab,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",apple,"a,b,c,d",100,100.3,5.166666667,100,apple,https://www.google.com/,apple,1400,300,9000,4,4,,6571,4,53,01/25/2005,-100 +MockComponent,"a,m,f","ab,cd","ab,ef",ef,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",orange,"a,b,c,d",4,4.5,6.816666667,4,orange,https://www.google.com/,orange,1500,300,200,5,5,,6571,5,54,01/26/2005,20 +MockComponent,"a,m,f","ab,cd","ab,ef",cd,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",test,"a,b,c,d",5,6.8,8.466666667,5,test,https://www.google.com/,test,1600,300,300,10,10,,6571,10,55,01/27/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ab,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",test,"a,b,c,d",10,10.2,10.11666667,10,test,https://www.google.com/,test,1700,300,300,100,100,,6571,100,56,01/28/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ef,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",test,"a,b,c,d",9,9,11.76666667,9,test,https://www.google.com/,test,1800,300,300,102,102,,6571,102,57,01/29/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",cd,"ab,cd","ab,cd,ef","a,b,c","a,c","a,b,c",apple,"a,b,c,d",6,6,13.41666667,6,apple,https://www.google.com/,apple,1900,300,300,104,104,,6571,104,58,01/30/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ab,"ab,cd","ab,cd,ef","d,e,a","a,c","d,e,f",orange,"a,b,c,d",8,8,15.06666667,8,orange,https://www.google.com/,orange,2000,300,300,109,109,,6571,109,59,01/31/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ef,"ab,cd","ab,cd,ef","d,e","a,c","d,e",test,"a,b,c,d",9,9,16.71666667,9,test,https://www.google.com/,test,2100,300,300,110,110,,6571,110,60,02/01/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",cd,"ab,cd","ab,cd,ef","d,e","a,c","d,e,f",test,"a,b,c,d",0,0,18.36666667,0,test,https://www.google.com/,test,2200,300,300,111,111,,6571,111,61,02/02/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ab,"ab,cd","ab,cd,ef","d,e","a,c","d,e",test,"a,b,c,d",1,1,20.01666667,1,test,https://www.google.com/,test,2300,300,300,120,120,,6571,120,62,02/03/2005,1 +MockComponent,"a,m,f","ab,cd","ab,ef",ef,"ab,cd","ab,cd,ef","d,e","a,c","d,e,f",apple,"a,b,c,d",2,2,21.66666667,2,apple,https://www.google.com/,apple,2400,300,300,130,130,,6571,130,63,02/04/2005,1 \ No newline at end of file diff --git a/tests/data/mock_manifests/TestManifestSubmission_test_submit_manifest_with_blacklisted_characters.csv b/tests/data/mock_manifests/TestManifestSubmission_test_submit_manifest_with_blacklisted_characters.csv new file mode 100644 index 000000000..351a267ac --- /dev/null +++ b/tests/data/mock_manifests/TestManifestSubmission_test_submit_manifest_with_blacklisted_characters.csv @@ -0,0 +1,2 @@ +Filename,Sample-ID,File-Format,Component,Genome Build,Genome FASTA,entityId +Manifest Submission - Manual test - file-based manifest submission/test-annotation-key-table-column-name/sample A.txt,100,FASTQ,BulkRNA-seqAssay,GRCh38,,syn63607043 \ No newline at end of file diff --git a/tests/data/mock_manifests/TestManifestSubmission_test_submit_manifest_with_hide_blanks.csv b/tests/data/mock_manifests/TestManifestSubmission_test_submit_manifest_with_hide_blanks.csv new file mode 100644 index 000000000..18bad92c0 --- /dev/null +++ b/tests/data/mock_manifests/TestManifestSubmission_test_submit_manifest_with_hide_blanks.csv @@ -0,0 +1,2 @@ +Filename,Sample ID,File Format,Component,Genome Build,Genome FASTA,entityId +Manifest Submission - Manual test - file-based manifest submission/hide-blanks-true/sample A.txt,1,FASTQ,BulkRNA-seqAssay,,,syn63606862 \ No newline at end of file diff --git a/tests/data/mock_manifests/TestManifestValidation_test_manifest_validation_basic_valid.csv b/tests/data/mock_manifests/TestManifestValidation_test_manifest_validation_basic_valid.csv new file mode 100644 index 000000000..dee60aaf9 --- /dev/null +++ b/tests/data/mock_manifests/TestManifestValidation_test_manifest_validation_basic_valid.csv @@ -0,0 +1,13 @@ +Patient ID,Sex,Year of Birth,Diagnosis,Component,Cancer Type,Family History +1,Male,,Healthy,Patient,,"Breast,Colorectal,Lung" +2,Male,,Healthy,Patient,,"Breast,Colorectal,Lung" +3,Male,,Healthy,Patient,,"Breast,Colorectal,Lung" +4,Male,,Healthy,Patient,,"Breast,Colorectal,Lung" +5,Male,,Healthy,Patient,,"Breast,Colorectal,Lung" +6,Male,,Healthy,Patient,,"Breast,Colorectal,Lung" +7,Male,,Healthy,Patient,,"Breast,Colorectal,Lung" +8,Male,,Healthy,Patient,,"Breast,Colorectal,Lung" +9,Male,,Cancer,Patient,Colorectal,"Breast,Colorectal" +10,Male,,Cancer,Patient,Colorectal,"Breast,Colorectal" +11,Male,,Cancer,Patient,Prostate,"Breast,Colorectal" +12,Male,,Cancer,Patient,Skin,"Breast,Colorectal" \ No newline at end of file diff --git a/tests/data/mock_manifests/TestManifestValidation_test_patient_manifest_invalid.csv b/tests/data/mock_manifests/TestManifestValidation_test_patient_manifest_invalid.csv new file mode 100644 index 000000000..fa5a9abf1 --- /dev/null +++ b/tests/data/mock_manifests/TestManifestValidation_test_patient_manifest_invalid.csv @@ -0,0 +1,11 @@ +Patient ID,Sex,Year of Birth,Diagnosis,Component,Cancer Type,Family History +1,Female,,Healthy,Patient,Random,Random +2,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +3,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +4,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +5,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +6,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +7,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +8,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +9,Female,,Healthy,Patient,Breast,"Colorectal,Breast" +10,Female,,Healthy,Patient,Breast,"Colorectal,Breast" diff --git a/tests/data/mock_manifests/mock_example_biospecimen_manifest.csv b/tests/data/mock_manifests/mock_example_biospecimen_manifest.csv new file mode 100644 index 000000000..c4926b694 --- /dev/null +++ b/tests/data/mock_manifests/mock_example_biospecimen_manifest.csv @@ -0,0 +1,2 @@ +Sample ID,Patient ID,Tissue Status,Component +1,1,Healthy,Biospecimen \ No newline at end of file diff --git a/tests/data/mock_manifests/mock_example_bulkrnaseq_manifest.csv b/tests/data/mock_manifests/mock_example_bulkrnaseq_manifest.csv new file mode 100644 index 000000000..55e840e0e --- /dev/null +++ b/tests/data/mock_manifests/mock_example_bulkrnaseq_manifest.csv @@ -0,0 +1,2 @@ +Filename,Sample ID,File Format,Component,Genome Build,Genome FASTA,entityId +Manifest Submission - Manual test - file-based manifest submission/test-dataset/sample A.txt,1,FASTQ,BulkRNA-seqAssay,,,syn63561932 diff --git a/tests/data/test_configs/CLI_test_config.yml b/tests/data/test_configs/CLI_test_config.yml new file mode 100644 index 000000000..ef512ffe3 --- /dev/null +++ b/tests/data/test_configs/CLI_test_config.yml @@ -0,0 +1,8 @@ +# This config is used to change the fileview for certain CLI tests +asset_store: + synapse: + config: "../../../.synapseConfig" + master_fileview_id: 'syn63923487' + +google_sheets: + service_acct_creds: "../../../schematic_service_account_creds.json" diff --git a/tests/data/test_configs/CLI_test_config2.yml b/tests/data/test_configs/CLI_test_config2.yml new file mode 100644 index 000000000..301dc156b --- /dev/null +++ b/tests/data/test_configs/CLI_test_config2.yml @@ -0,0 +1,12 @@ +# This config is used to change the fileview for certain CLI tests +asset_store: + synapse: + config: "../../../.synapseConfig" + master_fileview_id: 'syn51707141' + +manifest: + data_type: + - "MockComponent" + +google_sheets: + service_acct_creds: "../../../schematic_service_account_creds.json" diff --git a/tests/integration/test_commands.py b/tests/integration/test_commands.py new file mode 100644 index 000000000..8658f6e6b --- /dev/null +++ b/tests/integration/test_commands.py @@ -0,0 +1,1378 @@ +"""Tests for CLI commands""" + +import os +import uuid +from io import BytesIO + +import pytest +import requests +from openpyxl import load_workbook +from click.testing import CliRunner +import pandas as pd +import numpy as np + +from schematic.configuration.configuration import Configuration, CONFIG +from schematic.manifest.commands import manifest +from schematic.models.commands import model +from tests.conftest import ConfigurationForTesting + +LIGHT_BLUE = "FFEAF7F9" # Required cell +GRAY = "FFE0E0E0" # Header cell +WHITE = "00000000" # Optional cell + + +@pytest.fixture(name="runner") +def fixture_runner() -> CliRunner: + """Fixture for invoking command-line interfaces.""" + return CliRunner() + + +class TestSubmitCommand: + """ + Tests for: + - The command ran without error + - The output contained a message indicating no validation errors + - The output contained a message indicating the file was submitted + """ + + def test_submit_test_manifest(self, runner: CliRunner) -> None: + """Tests for a successful submission""" + try: + result = runner.invoke( + model, + [ + "--config", + "config_example.yml", + "submit", + "-mp", + "tests/data/mock_manifests/CLI_tests/CLI_biospecimen.csv", + "-vc", + "Biospecimen", + "-mrt", + "table_and_file", + "-d", + "syn23643250", + "--no-file_annotations_upload", + ], + ) + assert os.path.isfile("tests/data/example.Biospecimen.schema.json") + + finally: + os.remove("tests/data/example.Biospecimen.schema.json") + + assert result.exit_code == 0 + assert "No validation errors occured during validation." in result.output + assert ( + "File at 'tests/data/mock_manifests/CLI_tests/CLI_biospecimen.csv' was " + "successfully associated with dataset 'syn23643250'." + ) in result.output + + +class TestValidateCommand: + """Tests the schematic/models/commands validate command""" + + def test_validate_valid_manifest(self, runner: CliRunner) -> None: + """ + Tests for: + - command has no (python) errors, has exit code 0 + - command output has success message + - command output has no validation errors + """ + result = runner.invoke( + model, + [ + "--config", + "config_example.yml", + "validate", + "--manifest_path", + "tests/data/mock_manifests/Valid_Test_Manifest.csv", + "--data_type", + "MockComponent", + "--project_scope", + "syn54126707", + ], + ) + # command has no (python) errors, has exit code 0 + assert result.exit_code == 0 + # command output has success message + assert result.output.split("\n")[4] == ( + "Your manifest has been validated successfully. " + "There are no errors in your manifest, " + "and it can be submitted without any modifications." + ) + # command output has no validation errors + for line in result.output.split("\n")[4]: + assert not line.startswith("error") + + def test_validate_invalid_manifest(self, runner: CliRunner) -> None: + """ + Tests for: + - command has no (python) errors, has exit code 0 + - command output includes error message: 'Random' is not a comma delimited string + - command output includes error message: 'Random' is not one of + """ + result = runner.invoke( + model, + [ + "--config", + "config_example.yml", + "validate", + "--manifest_path", + "tests/data/mock_manifests/CLI_tests/CLI_patient_invalid.csv", + "--data_type", + "Patient", + ], + ) + # command has no (python) errors, has exit code 0 + assert result.exit_code == 0 + # command output includes error message: 'Random' is not a comma delimited string + assert result.output.split("\n")[3] == ( + "error: For attribute Family History in row 2 it does not appear " + "as if you provided a comma delimited string. Please check your entry " + "('Random'') and try again." + ) + # command output includes error message: 'Random' is not one of + # Note: the list of allowed values seems to have a random order so + # is not included in the test + assert result.output.split("\n")[4].startswith("error: 'Random' is not one of") + + +class TestManifestCommand: + """Tests the schematic/manifest/commands validate manifest command""" + + def test_generate_empty_csv_manifests(self, runner: CliRunner) -> None: + """ + Tests for: + - command has no errors, has exit code 0 + - command output has file creation messages for 'Patient' and 'Biospecimen' manifests + - manifest csvs and json schemas were created (then removed) + + """ + try: + # TODO: Set specific paths for csv and json output files with https://sagebionetworks.jira.com/browse/SCHEMATIC-209 + result = runner.invoke(manifest, ["--config", "config_example.yml", "get"]) + + # command has no (python) errors, has exit code 0 + assert result.exit_code == 0 + + biospecimen_df = pd.read_csv("tests/data/example.Biospecimen.manifest.csv") + patient_df = pd.read_csv("tests/data/example.Patient.manifest.csv") + + # Remove created files: + finally: + if os.path.isfile("tests/data/example.Biospecimen.manifest.csv"): + os.remove("tests/data/example.Biospecimen.manifest.csv") + if os.path.isfile("tests/data/example.Patient.manifest.csv"): + os.remove("tests/data/example.Patient.manifest.csv") + if os.path.isfile("tests/data/example.Biospecimen.schema.json"): + os.remove("tests/data/example.Biospecimen.schema.json") + if os.path.isfile("tests/data/example.Patient.schema.json"): + os.remove("tests/data/example.Patient.schema.json") + + # command output has file creation messages for 'Patient' and 'Biospecimen' manifests + assert result.output.split("\n")[7] == ( + "Find the manifest template using this CSV file path: " + "tests/data/example.Biospecimen.manifest.csv" + ) + assert result.output.split("\n")[10] == ( + "Find the manifest template using this CSV file path: " + "tests/data/example.Patient.manifest.csv" + ) + + # manifests have expected columns + assert list(biospecimen_df.columns) == [ + "Sample ID", + "Patient ID", + "Tissue Status", + "Component", + ] + assert list(patient_df.columns) == [ + "Patient ID", + "Sex", + "Year of Birth", + "Diagnosis", + "Component", + "Cancer Type", + "Family History", + ] + # manifests only have one row + assert len(biospecimen_df.index) == 1 + assert len(patient_df.index) == 1 + # manifests are empty except for component column which contains the name of the component + assert biospecimen_df["Component"].to_list() == ["Biospecimen"] + assert patient_df["Component"].to_list() == ["Patient"] + for column in ["Sample ID", "Patient ID", "Tissue Status"]: + assert np.isnan(biospecimen_df[column].to_list()[0]) + for column in [ + "Patient ID", + "Sex", + "Year of Birth", + "Diagnosis", + "Cancer Type", + "Family History", + ]: + assert np.isnan(patient_df[column].to_list()[0]) + + @pytest.mark.manual_verification_required + def test_generate_empty_google_sheet_manifests( + self, + runner: CliRunner, + testing_config: ConfigurationForTesting, + ) -> None: + """ + Tests for: + - command has no errors, has exit code 0 + - command output has file creation messages for 'Patient' and 'Biospecimen' manifest csvs + - command output has file creation messages for 'Patient' and 'Biospecimen' manifest links + + both google sheets: + - have drop downs are populated correctly + - required fields are marked as “light blue”, + - non-required field are marked as white. + - first row comments are 'TBD' + + Patient google sheet: + - first row of 'Family History has its own comment + + Manual tests: + - Open 'CLI_TestManifestCommand_google_sheet_empty_patient.xlsx' + - Confirm 'Diagnosis' column to be 'cancer' in the first row + - Confirm 'Cancer Type' and 'Family History' cells in first row should be light blue. + + """ + try: + # TODO: Set specific paths json output files with https://sagebionetworks.jira.com/browse/SCHEMATIC-209 + result = runner.invoke( + manifest, + [ + "--config", + "config_example.yml", + "get", + "--sheet_url", + "--output_csv", + "CLI_empty_gs.csv", + ], + ) + # command has no errors, has exit code 0 + assert result.exit_code == 0 + + finally: + # Remove created files: + os.remove("CLI_empty_gs.csv") + if os.path.isfile("tests/data/example.Biospecimen.schema.json"): + os.remove("tests/data/example.Biospecimen.schema.json") + if os.path.isfile("tests/data/example.Patient.schema.json"): + os.remove("tests/data/example.Patient.schema.json") + + # command output has file creation messages for 'Patient' and 'Biospecimen' manifest csvs + assert result.output.split("\n")[9] == ( + "Find the manifest template using this CSV file path: " "CLI_empty_gs.csv" + ) + assert result.output.split("\n")[14] == ( + "Find the manifest template using this CSV file path: " "CLI_empty_gs.csv" + ) + + # command output has file creation messages for 'Patient' and 'Biospecimen' manifest links + assert result.output.split("\n")[7] == ( + "Find the manifest template using this Google Sheet URL:" + ) + assert result.output.split("\n")[8].startswith( + "https://docs.google.com/spreadsheets/d/" + ) + assert result.output.split("\n")[12] == ( + "Find the manifest template using this Google Sheet URL:" + ) + assert result.output.split("\n")[13].startswith( + "https://docs.google.com/spreadsheets/d/" + ) + + # Get the google sheet urls form the message + google_sheet_url_biospecimen = result.output.split("\n")[8] + google_sheet_url_patient = result.output.split("\n")[13] + + # Download the Google Sheets content as an Excel file and load into openpyxl + export_url = f"{google_sheet_url_biospecimen}/export?format=xlsx" + response = requests.get(export_url) + assert response.status_code == 200 + content = BytesIO(response.content) + workbook = load_workbook(content) + sheet1 = workbook["Sheet1"] + + # Track column positions + columns = {cell.value: cell.column_letter for cell in sheet1[1]} + + assert sheet1[f"{columns['Sample ID']}1"].value == "Sample ID" + assert sheet1[f"{columns['Sample ID']}2"].value is None + + assert sheet1[f"{columns['Patient ID']}1"].value == "Patient ID" + assert sheet1[f"{columns['Patient ID']}2"].value is None + + assert sheet1[f"{columns['Tissue Status']}1"].value == "Tissue Status" + assert sheet1[f"{columns['Tissue Status']}2"].value is None + + assert sheet1[f"{columns['Component']}1"].value == "Component" + assert sheet1[f"{columns['Component']}2"].value == "Biospecimen" + + # AND there are no more columns in the first sheet + assert sheet1[f"{columns['Component']}1"].offset(column=1).value is None + + # AND the first row is locked on scroll + assert sheet1.freeze_panes == "A2" + + # AND each cell in the first row has a comment "TBD" + for col in [ + "Sample ID", + "Patient ID", + "Tissue Status", + "Component", + ]: + assert sheet1[f"{columns[col]}1"].comment.text == "TBD" + + # drop downs are populated correctly + data_validations = sheet1.data_validations.dataValidation + tissue_status_validation = None + for dv in data_validations: + if f"{columns['Tissue Status']}2" in dv.sqref: + tissue_status_validation = dv + continue + # AND there are no other data validations + assert False, f"Unexpected data validation found: {dv}" + assert tissue_status_validation is not None + assert tissue_status_validation.type == "list" + assert tissue_status_validation.formula1 == "Sheet2!$C$2:$C$3" + + # required fields are marked as “light blue”, while other non-required fields are marked as white. + for col in ["Sample ID", "Patient ID", "Tissue Status", "Component"]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == LIGHT_BLUE + + # Download the Google Sheets content as an Excel file and load into openpyxl + export_url = f"{google_sheet_url_patient}/export?format=xlsx" + response = requests.get(export_url) + assert response.status_code == 200 + content = BytesIO(response.content) + workbook = load_workbook(content) + sheet1 = workbook["Sheet1"] + + # Track column positions + columns = {cell.value: cell.column_letter for cell in sheet1[1]} + + # AND the content of the first sheet is as expected + assert sheet1[f"{columns['Patient ID']}1"].value == "Patient ID" + assert sheet1[f"{columns['Patient ID']}2"].value is None + + assert sheet1[f"{columns['Sex']}1"].value == "Sex" + assert sheet1[f"{columns['Sex']}2"].value is None + + assert sheet1[f"{columns['Year of Birth']}1"].value == "Year of Birth" + assert sheet1[f"{columns['Year of Birth']}2"].value is None + + assert sheet1[f"{columns['Diagnosis']}1"].value == "Diagnosis" + assert sheet1[f"{columns['Diagnosis']}2"].value is None + + assert sheet1[f"{columns['Component']}1"].value == "Component" + assert sheet1[f"{columns['Component']}2"].value == "Patient" + + assert sheet1[f"{columns['Cancer Type']}1"].value == "Cancer Type" + assert sheet1[f"{columns['Cancer Type']}2"].value is None + + assert sheet1[f"{columns['Family History']}1"].value == "Family History" + assert sheet1[f"{columns['Family History']}2"].value is None + + # AND there are no more columns in the first sheet + assert sheet1[f"{columns['Family History']}1"].offset(column=1).value is None + + # AND the first row is locked on scroll + assert sheet1.freeze_panes == "A2" + + # AND each cell in the first row has a comment "TBD" + for col in [ + "Patient ID", + "Sex", + "Year of Birth", + "Diagnosis", + "Component", + "Cancer Type", + "Family History", + ]: + assert sheet1[f"{columns[col]}1"].comment.text == "TBD" + + # AND the comment in "Family History" cell is as expected + assert ( + sheet1[f"{columns['Family History']}2"].comment.text + == "Please enter applicable comma-separated items selected from the set of allowable terms for this attribute. See our data standards for allowable terms" + ) + + # AND the dropdown lists exist and are as expected + data_validations = sheet1.data_validations.dataValidation + sex_validation = None + diagnosis_validation = None + cancer_type_validation = None + for dv in data_validations: + if f"{columns['Sex']}2" in dv.sqref: + sex_validation = dv + continue + elif f"{columns['Diagnosis']}2" in dv.sqref: + diagnosis_validation = dv + continue + elif f"{columns['Cancer Type']}2" in dv.sqref: + cancer_type_validation = dv + continue + # AND there are no other data validations + assert False, f"Unexpected data validation found: {dv}" + + assert sex_validation is not None + assert sex_validation.type == "list" + assert sex_validation.formula1 == "Sheet2!$B$2:$B$4" + + assert diagnosis_validation is not None + assert diagnosis_validation.type == "list" + assert diagnosis_validation.formula1 == "Sheet2!$D$2:$D$3" + + assert cancer_type_validation is not None + assert cancer_type_validation.type == "list" + assert cancer_type_validation.formula1 == "Sheet2!$F$2:$F$6" + + # AND the fill colors are as expected + for col in ["Patient ID", "Sex", "Diagnosis", "Component"]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == LIGHT_BLUE + + for col in ["Year of Birth", "Cancer Type", "Family History"]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == GRAY + + for col in ["Patient ID", "Sex", "Diagnosis", "Component"]: + assert sheet1[f"{columns[col]}2"].fill.start_color.index == LIGHT_BLUE + + for col in ["Year of Birth", "Cancer Type", "Family History"]: + assert sheet1[f"{columns[col]}2"].fill.start_color.index == WHITE + + # AND conditional formatting is functioning as expected (MANUAL VERIFICATION) + workbook["Sheet1"][f"{columns['Diagnosis']}2"].value = "Cancer" + + # AND a copy of the Excel file is saved to the test directory for manual verification + if testing_config.manual_test_verification_enabled: + workbook.save( + os.path.join( + testing_config.manual_test_verification_path, + "CLI_TestManifestCommand_google_sheet_empty_patient.xlsx", + ) + ) + + @pytest.mark.manual_verification_required + def test_generate_empty_excel_manifest( + self, testing_config: ConfigurationForTesting, runner: CliRunner + ) -> None: + """ + Tests for: + - command has no errors, has exit code 0 + - command output has excel file creation message + + Tests for google sheet: + - drop downs are populated correctly + - required fields are marked as “light blue”, + while other non-required fields are marked as white. + - first row comments are 'TBD' + - first row of 'Family History has its own comment + + + Manual tests: + - Open 'CLI_TestManifestCommand_excel_empty_patient.xlsx' + - Confirm 'Diagnosis' column to be 'cancer' in the first row: + - Confirm 'Cancer Type' and 'Family History' cells in first row should be light blue. + """ + try: + # TODO: Set specific paths for csv and json output files with https://sagebionetworks.jira.com/browse/SCHEMATIC-209 + result = runner.invoke( + manifest, + [ + "--config", + "config_example.yml", + "get", + "--output_xlsx", + "./CLI_empty_excel.xlsx", + ], + ) + + # command has no errors, has exit code 0 + assert result.exit_code == 0 + workbook = load_workbook("CLI_empty_excel.xlsx") + finally: + # Remove created files: + os.remove("CLI_empty_excel.xlsx") + if os.path.isfile("tests/data/example.Patient.schema.json"): + os.remove("tests/data/example.Patient.schema.json") + if os.path.isfile("tests/data/example.Biospecimen.schema.json"): + os.remove("tests/data/example.Biospecimen.schema.json") + + # command output has excel file creation message + assert ( + result.output.split("\n")[7] + == "Find the manifest template using this Excel file path: ./CLI_empty_excel.xlsx" + ) + + sheet1 = workbook["Sheet1"] + # Track column positions + columns = {cell.value: cell.column_letter for cell in sheet1[1]} + + # AND the content of the first sheet is as expected + assert sheet1[f"{columns['Patient ID']}1"].value == "Patient ID" + assert sheet1[f"{columns['Patient ID']}2"].value is None + + assert sheet1[f"{columns['Sex']}1"].value == "Sex" + assert sheet1[f"{columns['Sex']}2"].value is None + + assert sheet1[f"{columns['Year of Birth']}1"].value == "Year of Birth" + assert sheet1[f"{columns['Year of Birth']}2"].value is None + + assert sheet1[f"{columns['Diagnosis']}1"].value == "Diagnosis" + assert sheet1[f"{columns['Diagnosis']}2"].value is None + + assert sheet1[f"{columns['Component']}1"].value == "Component" + assert sheet1[f"{columns['Component']}2"].value == "Patient" + + assert sheet1[f"{columns['Cancer Type']}1"].value == "Cancer Type" + assert sheet1[f"{columns['Cancer Type']}2"].value is None + + assert sheet1[f"{columns['Family History']}1"].value == "Family History" + assert sheet1[f"{columns['Family History']}2"].value is None + + # AND there are no more columns in the first sheet + assert sheet1[f"{columns['Family History']}1"].offset(column=1).value is None + + # AND the first row is locked on scroll + assert sheet1.freeze_panes == "A2" + + # AND each cell in the first row has a comment "TBD" + for col in [ + "Patient ID", + "Sex", + "Year of Birth", + "Diagnosis", + "Component", + "Cancer Type", + "Family History", + ]: + assert sheet1[f"{columns[col]}1"].comment.text == "TBD" + + # AND the comment in "Family History" cell is as expected + assert ( + sheet1[f"{columns['Family History']}2"].comment.text + == "Please enter applicable comma-separated items selected from the set of allowable terms for this attribute. See our data standards for allowable terms" + ) + + # AND the dropdown lists exist and are as expected + data_validations = sheet1.data_validations.dataValidation + sex_validation = None + diagnosis_validation = None + cancer_type_validation = None + for dv in data_validations: + if f"{columns['Sex']}2" in dv.sqref: + sex_validation = dv + continue + elif f"{columns['Diagnosis']}2" in dv.sqref: + diagnosis_validation = dv + continue + elif f"{columns['Cancer Type']}2" in dv.sqref: + cancer_type_validation = dv + continue + # AND there are no other data validations + assert False, f"Unexpected data validation found: {dv}" + + assert sex_validation is not None + assert sex_validation.type == "list" + assert sex_validation.formula1 == "Sheet2!$B$2:$B$4" + + assert diagnosis_validation is not None + assert diagnosis_validation.type == "list" + assert diagnosis_validation.formula1 == "Sheet2!$D$2:$D$3" + + assert cancer_type_validation is not None + assert cancer_type_validation.type == "list" + assert cancer_type_validation.formula1 == "Sheet2!$F$2:$F$6" + + # AND the fill colors are as expected + for col in ["Patient ID", "Sex", "Diagnosis", "Component"]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == LIGHT_BLUE + + for col in ["Year of Birth", "Cancer Type", "Family History"]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == GRAY + + for col in ["Patient ID", "Sex", "Diagnosis", "Component"]: + assert sheet1[f"{columns[col]}2"].fill.start_color.index == LIGHT_BLUE + + for col in ["Year of Birth", "Cancer Type", "Family History"]: + assert sheet1[f"{columns[col]}2"].fill.start_color.index == WHITE + + # AND conditional formatting is functioning as expected (MANUAL VERIFICATION) + workbook["Sheet1"][f"{columns['Diagnosis']}2"].value = "Cancer" + # AND a copy of the Excel file is saved to the test directory for manual verification + if testing_config.manual_test_verification_enabled: + workbook.save( + os.path.join( + testing_config.manual_test_verification_path, + "CLI_TestManifestCommand_excel_empty_patient.xlsx", + ) + ) + + @pytest.mark.manual_verification_required + def test_generate_bulk_rna_google_sheet_manifest( + self, testing_config: ConfigurationForTesting, runner: CliRunner + ) -> None: + """ + Tests for: + - command has no errors, has exit code 0 + - command output has google sheet and csv message + + Tests for google sheet: + - drop downs are populated correctly + - required fields are marked as “light blue”, + while other non-required fields are marked as white. + - first row comments are 'TBD' + + + Manual tests: + - Open 'CLI_TestManifestCommand_google_sheet_bulk_rna.xlsx' + - Confirm 'File Format' column to be 'BAM' in the first row + - Confirm 'Genome Build' cell in first row should be light blue + - Confirm 'File Format' column to be 'CRAM' in the second row: + - Confirm 'Genome Build' and 'Genome FASTA' cells in second row should be light blue. + """ + try: + # TODO: Set specific paths for json output files with https://sagebionetworks.jira.com/browse/SCHEMATIC-209 + result = runner.invoke( + manifest, + [ + "--config", + "tests/data/test_configs/CLI_test_config.yml", + "get", + "--dataset_id", + "syn63923432", + "--data_type", + "BulkRNA-seqAssay", + "--sheet_url", + "--output_csv", + "./CLI_gs_bulk_rna.csv", + ], + ) + assert result.exit_code == 0 + + finally: + # Remove created files: + os.remove("CLI_gs_bulk_rna.csv") + if os.path.isfile("tests/data/example.BulkRNA-seqAssay.schema.json"): + os.remove("tests/data/example.BulkRNA-seqAssay.schema.json") + + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + # Reset config to it's default values + CONFIG.load_config("config_example.yml") + + assert result.output.split("\n")[7] == ( + "Find the manifest template using this Google Sheet URL:" + ) + assert result.output.split("\n")[8].startswith( + "https://docs.google.com/spreadsheets/d/" + ) + assert result.output.split("\n")[9] == ( + "Find the manifest template using this CSV file path: " + "./CLI_gs_bulk_rna.csv" + ) + + google_sheet_url = result.output.split("\n")[8] + + # Download the Google Sheets content as an Excel file and load into openpyxl + export_url = f"{google_sheet_url}/export?format=xlsx" + response = requests.get(export_url) + assert response.status_code == 200 + content = BytesIO(response.content) + workbook = load_workbook(content) + sheet1 = workbook["Sheet1"] + sheet2 = workbook["Sheet2"] + + # Track column positions + columns = {cell.value: cell.column_letter for cell in sheet1[1]} + + # AND the content of the first sheet is as expected + assert columns["Filename"] is not None + assert columns["Sample ID"] is not None + assert columns["File Format"] is not None + assert columns["Component"] is not None + assert columns["Genome Build"] is not None + assert columns["Genome FASTA"] is not None + assert columns["entityId"] is not None + + assert sheet1[f"{columns['Filename']}2"].value is None + assert ( + sheet1[f"{columns['Filename']}3"].value + == "Schematic CLI automation resources/TestDataset1/Sample_A.csv" + ) + assert ( + sheet1[f"{columns['Filename']}4"].value + == "Schematic CLI automation resources/TestDataset1/Sample_B.csv" + ) + assert ( + sheet1[f"{columns['Filename']}5"].value + == "Schematic CLI automation resources/TestDataset1/Sample_C.csv" + ) + assert sheet1[f"{columns['Sample ID']}2"].value == 2022 + assert sheet1[f"{columns['Sample ID']}3"].value is None + assert sheet1[f"{columns['Sample ID']}4"].value is None + assert sheet1[f"{columns['Sample ID']}5"].value is None + assert sheet1[f"{columns['File Format']}2"].value == "CSV/TSV" + assert sheet1[f"{columns['File Format']}3"].value is None + assert sheet1[f"{columns['File Format']}4"].value is None + assert sheet1[f"{columns['File Format']}5"].value is None + assert sheet1[f"{columns['Component']}2"].value == "BulkRNA-seqAssay" + assert sheet1[f"{columns['Component']}3"].value is None + assert sheet1[f"{columns['Component']}4"].value is None + assert sheet1[f"{columns['Component']}5"].value is None + assert sheet1[f"{columns['Genome Build']}2"].value == "GRCm38" + assert sheet1[f"{columns['Genome Build']}3"].value is None + assert sheet1[f"{columns['Genome Build']}4"].value is None + assert sheet1[f"{columns['Genome Build']}5"].value is None + assert sheet1[f"{columns['Genome FASTA']}2"].value is None + assert sheet1[f"{columns['Genome FASTA']}3"].value is None + assert sheet1[f"{columns['Genome FASTA']}4"].value is None + assert sheet1[f"{columns['Genome FASTA']}5"].value is None + assert sheet1[f"{columns['entityId']}2"].value == "syn28278954" + assert sheet1[f"{columns['entityId']}3"].value == "syn63923439" + assert sheet1[f"{columns['entityId']}4"].value == "syn63923441" + assert sheet1[f"{columns['entityId']}5"].value == "syn63923444" + + # AND there are no more columns in the first sheet + assert sheet1[f"{columns['entityId']}1"].offset(column=1).value is None + + # AND the first row is locked on scroll + assert sheet1.freeze_panes == "A2" + + # AND each of these cells in the first row has a comment "TBD" + for col in [ + "Filename", + "Sample ID", + "File Format", + "Component", + "Genome Build", + "Genome FASTA", + ]: + assert sheet1[f"{columns[col]}1"].comment.text == "TBD" + + # AND each of these cells in the first row do not have a comment + for col in [ + "entityId", + ]: + assert sheet1[f"{columns[col]}1"].comment is None + + # AND the dropdown lists exist and are as expected + data_validations = sheet1.data_validations.dataValidation + file_format_validation = None + genome_build_validation = None + for dv in data_validations: + if f"{columns['File Format']}2" in dv.sqref: + file_format_validation = dv + continue + elif f"{columns['Genome Build']}2" in dv.sqref: + genome_build_validation = dv + continue + # AND there are no other data validations + assert False, f"Unexpected data validation found: {dv}" + + assert file_format_validation is not None + assert file_format_validation.type == "list" + assert ( + file_format_validation.formula1 + == f"Sheet2!${columns['File Format']}$2:${columns['File Format']}$5" + ) + + assert genome_build_validation is not None + assert genome_build_validation.type == "list" + assert ( + genome_build_validation.formula1 + == f"Sheet2!${columns['Genome Build']}$2:${columns['Genome Build']}$5" + ) + + # AND the fill colors are as expected + for col in ["Filename", "Sample ID", "File Format", "Component"]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == LIGHT_BLUE + + for col in [ + "Genome Build", + "Genome FASTA", + "entityId", + ]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == GRAY + + # AND conditional formatting is functioning as expected (MANUAL VERIFICATION) + workbook["Sheet1"][f"{columns['File Format']}2"].value = "BAM" + workbook["Sheet1"][f"{columns['File Format']}3"].value = "CRAM" + workbook["Sheet1"][f"{columns['File Format']}4"].value = "FASTQ" + + # AND the workbook contains two sheets: "Sheet1" and "Sheet2" + assert workbook.sheetnames == ["Sheet1", "Sheet2"] + + # AND the second sheet is hidden + assert sheet2.sheet_state == "hidden" + + # AND the values in "Sheet2" are as expected + assert sheet2["A1"].value == "Filename" + assert sheet2["B1"].value == "Sample ID" + assert sheet2["C1"].value == "File Format" + assert sheet2["D1"].value == "Component" + assert sheet2["E1"].value == "Genome Build" + assert sheet2["F1"].value == "Genome FASTA" + + assert sheet2["A2"].value is None + assert sheet2["B2"].value is None + assert sheet2["C2"].value == "BAM" + assert sheet2["D2"].value is None + assert sheet2["E2"].value == "GRCh37" + assert sheet2["F2"].value is None + + assert sheet2["A3"].value is None + assert sheet2["B3"].value is None + assert sheet2["C3"].value == "CRAM" + assert sheet2["D3"].value is None + assert sheet2["E3"].value == "GRCh38" + assert sheet2["F3"].value is None + + assert sheet2["A4"].value is None + assert sheet2["B4"].value is None + assert sheet2["C4"].value == "CSV/TSV" + assert sheet2["D4"].value is None + assert sheet2["E4"].value == "GRCm38" + assert sheet2["F4"].value is None + + assert sheet2["A5"].value is None + assert sheet2["B5"].value is None + assert sheet2["C5"].value == "FASTQ" + assert sheet2["D5"].value is None + assert sheet2["E5"].value == "GRCm39" + assert sheet2["F5"].value is None + + # AND there are no more columns in the second sheet + assert sheet2["G1"].value is None + + # AND conditional formatting is functioning as expected (MANUAL VERIFICATION) + workbook["Sheet1"][f"{columns['File Format']}2"].value = "BAM" + workbook["Sheet1"][f"{columns['File Format']}3"].value = "CRAM" + # A copy of the Excel file is saved to the test directory for manual verification + if testing_config.manual_test_verification_enabled: + workbook.save( + os.path.join( + testing_config.manual_test_verification_path, + "CLI_TestManifestCommand_google_sheet_bulk_rna.xlsx", + ) + ) + + @pytest.mark.manual_verification_required + def test_generate_bulk_rna_google_sheet_manifest_with_annotations( + self, testing_config: ConfigurationForTesting, runner: CliRunner + ) -> None: + """ + Tests for: + - command has no errors, has exit code 0 + - command output has google sheet and csv message + + Tests for google sheet: + - drop downs are populated correctly + - required fields are marked as “light blue”, + while other non-required fields are marked as white. + - first row comments are 'TBD' + + + Manual tests: + - Open CLI_TestManifestCommand_google_sheet_bulk_rna_with_annotations_url.txt + - Open the google sheet link in the above file in a browser + - In the first row the File Format column should be txt. Hover over it, and there should be an Invalid error. + - In the second row the File Format column should be csv. Hover over it, and there should be an Invalid error. + """ + try: + # TODO: Set specific paths for json output files with https://sagebionetworks.jira.com/browse/SCHEMATIC-209 + result = runner.invoke( + manifest, + [ + "--config", + "config_example.yml", + "get", + "--dataset_id", + "syn25614635", + "--data_type", + "BulkRNA-seqAssay", + "--sheet_url", + "--use_annotations", + "--output_csv", + "./CLI_gs_bulk_rna_annos.csv", + ], + ) + assert result.exit_code == 0 + finally: + # Remove created files: + if os.path.isfile("tests/data/example.BulkRNA-seqAssay.schema.json"): + os.remove("tests/data/example.BulkRNA-seqAssay.schema.json") + os.remove("./CLI_gs_bulk_rna_annos.csv") + + assert result.output.split("\n")[10] == ( + "Find the manifest template using this Google Sheet URL:" + ) + assert result.output.split("\n")[11].startswith( + "https://docs.google.com/spreadsheets/d/" + ) + assert result.output.split("\n")[12] == ( + "Find the manifest template using this CSV file path: " + "./CLI_gs_bulk_rna_annos.csv" + ) + + google_sheet_url = result.output.split("\n")[11] + + # Download the Google Sheets content as an Excel file and load into openpyxl + export_url = f"{google_sheet_url}/export?format=xlsx" + response = requests.get(export_url) + assert response.status_code == 200 + content = BytesIO(response.content) + workbook = load_workbook(content) + sheet1 = workbook["Sheet1"] + sheet2 = workbook["Sheet2"] + + # Track column positions + columns = {cell.value: cell.column_letter for cell in sheet1[1]} + + # AND the content of the first sheet is as expected + assert columns["Filename"] is not None + assert columns["Sample ID"] is not None + assert columns["File Format"] is not None + assert columns["Component"] is not None + assert columns["Genome Build"] is not None + assert columns["Genome FASTA"] is not None + assert columns["impact"] is not None + assert columns["author"] is not None + assert columns["eTag"] is not None + assert columns["IsImportantText"] is not None + assert columns["IsImportantBool"] is not None + assert columns["confidence"] is not None + assert columns["date"] is not None + assert columns["Year of Birth"] is not None + assert columns["entityId"] is not None + + assert ( + sheet1[f"{columns['Filename']}2"].value + == "schematic - main/TestDatasets/TestDataset-Annotations-v3/Sample_A.txt" + ) + assert sheet1[f"{columns['Sample ID']}2"].value is None + assert sheet1[f"{columns['File Format']}2"].value == "txt" + assert sheet1[f"{columns['Component']}2"].value == "BulkRNA-seqAssay" + assert sheet1[f"{columns['Genome Build']}2"].value is None + assert sheet1[f"{columns['Genome FASTA']}2"].value is None + assert sheet1[f"{columns['impact']}2"].value is not None + assert sheet1[f"{columns['author']}2"].value is not None + assert sheet1[f"{columns['eTag']}2"].value is not None + assert sheet1[f"{columns['IsImportantText']}2"].value is not None + assert sheet1[f"{columns['IsImportantBool']}2"].value is not None + assert sheet1[f"{columns['confidence']}2"].value is not None + assert sheet1[f"{columns['date']}2"].value is None + assert sheet1[f"{columns['Year of Birth']}2"].value is not None + assert sheet1[f"{columns['entityId']}2"].value is not None + + assert ( + sheet1[f"{columns['Filename']}3"].value + == "schematic - main/TestDatasets/TestDataset-Annotations-v3/Sample_B.txt" + ) + assert sheet1[f"{columns['Sample ID']}3"].value is None + assert sheet1[f"{columns['File Format']}3"].value == "csv" + assert sheet1[f"{columns['Component']}3"].value == "BulkRNA-seqAssay" + assert sheet1[f"{columns['Genome Build']}3"].value is None + assert sheet1[f"{columns['Genome FASTA']}3"].value is None + assert sheet1[f"{columns['impact']}3"].value is None + assert sheet1[f"{columns['author']}3"].value is None + assert sheet1[f"{columns['eTag']}3"].value is not None + assert sheet1[f"{columns['IsImportantText']}3"].value is None + assert sheet1[f"{columns['IsImportantBool']}3"].value is None + assert sheet1[f"{columns['confidence']}3"].value is not None + assert sheet1[f"{columns['date']}3"].value is not None + assert sheet1[f"{columns['Year of Birth']}3"].value is None + assert sheet1[f"{columns['entityId']}3"].value is not None + + assert ( + sheet1[f"{columns['Filename']}4"].value + == "schematic - main/TestDatasets/TestDataset-Annotations-v3/Sample_C.txt" + ) + assert sheet1[f"{columns['Sample ID']}4"].value is None + assert sheet1[f"{columns['File Format']}4"].value == "fastq" + assert sheet1[f"{columns['Component']}4"].value == "BulkRNA-seqAssay" + assert sheet1[f"{columns['Genome Build']}4"].value is None + assert sheet1[f"{columns['Genome FASTA']}4"].value is None + assert sheet1[f"{columns['impact']}4"].value is None + assert sheet1[f"{columns['author']}4"].value is None + assert sheet1[f"{columns['eTag']}4"].value is not None + assert sheet1[f"{columns['IsImportantText']}4"].value is not None + assert sheet1[f"{columns['IsImportantBool']}4"].value is not None + assert sheet1[f"{columns['confidence']}4"].value is None + assert sheet1[f"{columns['date']}4"].value is None + assert sheet1[f"{columns['Year of Birth']}4"].value is None + assert sheet1[f"{columns['entityId']}4"].value is not None + + # AND there are no more columns in the first sheet + assert sheet1[f"{columns['entityId']}1"].offset(column=1).value is None + + # AND the first row is locked on scroll + assert sheet1.freeze_panes == "A2" + + # AND each of these cells in the first row has a comment "TBD" + for col in [ + "Filename", + "Sample ID", + "File Format", + "Component", + "Genome Build", + "Genome FASTA", + ]: + assert sheet1[f"{columns[col]}1"].comment.text == "TBD" + + # AND each of these cells in the first row do not have a comment + for col in [ + "impact", + "author", + "eTag", + "IsImportantText", + "IsImportantBool", + "confidence", + "date", + "Year of Birth", + "entityId", + ]: + assert sheet1[f"{columns[col]}1"].comment is None + + # AND the dropdown lists exist and are as expected + data_validations = sheet1.data_validations.dataValidation + file_format_validation = None + genome_build_validation = None + for dv in data_validations: + if f"{columns['File Format']}2" in dv.sqref: + file_format_validation = dv + continue + elif f"{columns['Genome Build']}2" in dv.sqref: + genome_build_validation = dv + continue + # AND there are no other data validations + assert False, f"Unexpected data validation found: {dv}" + + assert file_format_validation is not None + assert file_format_validation.type == "list" + assert ( + file_format_validation.formula1 + == f"Sheet2!${columns['File Format']}$2:${columns['File Format']}$5" + ) + + assert genome_build_validation is not None + assert genome_build_validation.type == "list" + assert ( + genome_build_validation.formula1 + == f"Sheet2!${columns['Genome Build']}$2:${columns['Genome Build']}$5" + ) + + # AND the fill colors are as expected + for col in ["Filename", "Sample ID", "File Format", "Component"]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == LIGHT_BLUE + + for col in [ + "Genome Build", + "Genome FASTA", + "impact", + "author", + "eTag", + "IsImportantText", + "IsImportantBool", + "confidence", + "date", + "Year of Birth", + "entityId", + ]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == GRAY + + # AND conditional formatting is functioning as expected (MANUAL VERIFICATION) + workbook["Sheet1"][f"{columns['File Format']}2"].value = "BAM" + workbook["Sheet1"][f"{columns['File Format']}3"].value = "CRAM" + workbook["Sheet1"][f"{columns['File Format']}4"].value = "FASTQ" + + # AND the workbook contains two sheets: "Sheet1" and "Sheet2" + assert workbook.sheetnames == ["Sheet1", "Sheet2"] + + # AND the second sheet is hidden + assert sheet2.sheet_state == "hidden" + + # AND the values in "Sheet2" are as expected + assert sheet2["A1"].value == "Filename" + assert sheet2["B1"].value == "Sample ID" + assert sheet2["C1"].value == "File Format" + assert sheet2["D1"].value == "Component" + assert sheet2["E1"].value == "Genome Build" + assert sheet2["F1"].value == "Genome FASTA" + + assert sheet2["A2"].value is None + assert sheet2["B2"].value is None + assert sheet2["C2"].value == "BAM" + assert sheet2["D2"].value is None + assert sheet2["E2"].value == "GRCh37" + assert sheet2["F2"].value is None + + assert sheet2["A3"].value is None + assert sheet2["B3"].value is None + assert sheet2["C3"].value == "CRAM" + assert sheet2["D3"].value is None + assert sheet2["E3"].value == "GRCh38" + assert sheet2["F3"].value is None + + assert sheet2["A4"].value is None + assert sheet2["B4"].value is None + assert sheet2["C4"].value == "CSV/TSV" + assert sheet2["D4"].value is None + assert sheet2["E4"].value == "GRCm38" + assert sheet2["F4"].value is None + + assert sheet2["A5"].value is None + assert sheet2["B5"].value is None + assert sheet2["C5"].value == "FASTQ" + assert sheet2["D5"].value is None + assert sheet2["E5"].value == "GRCm39" + assert sheet2["F5"].value is None + + # AND there are no more columns in the second sheet + assert sheet2["G1"].value is None + + # A copy of the Excel file is saved to the test directory for manual verification + if testing_config.manual_test_verification_enabled: + path = os.path.join( + testing_config.manual_test_verification_path, + "CLI_TestManifestCommand_google_sheet_bulk_rna_with_annotations_url.txt", + ) + with open(path, "w") as f: + f.write(google_sheet_url) + + def test_generate_mock_component_excel_manifest(self, runner: CliRunner) -> None: + """ + Tests for: + - Command has no errors, has exit code 0 + - Command output has excel file message + """ + try: + # TODO: Set specific paths for json output files with https://sagebionetworks.jira.com/browse/SCHEMATIC-209 + result = runner.invoke( + manifest, + [ + "--config", + "tests/data/test_configs/CLI_test_config2.yml", + "get", + "--output_xlsx", + "./CLI_mock_comp.xlsx", + "--dataset_id", + "syn52746566", + ], + ) + + # Command has no errors, has exit code 0 + assert result.exit_code == 0 + workbook = load_workbook("./CLI_mock_comp.xlsx") + finally: + # Remove created files: + if os.path.isfile("tests/data/example.MockComponent.schema.json"): + os.remove("tests/data/example.MockComponent.schema.json") + os.remove("./CLI_mock_comp.xlsx") + + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + # Reset config to it's default values + CONFIG.load_config("config_example.yml") + + # Command output has excel file message + assert result.output.split("\n")[8] == ( + "Find the manifest template using this Excel file path: ./CLI_mock_comp.xlsx" + ) + + sheet1 = workbook["Sheet1"] + # Track column positions + columns = {cell.value: cell.column_letter for cell in sheet1[1]} + + # AND the content of the first sheet is as expected + assert sheet1[f"{columns['Component']}1"].value == "Component" + assert sheet1[f"{columns['Component']}2"].value == "MockComponent" + assert sheet1[f"{columns['Component']}3"].value == "MockComponent" + + assert sheet1[f"{columns['Check List']}1"].value == "Check List" + assert sheet1[f"{columns['Check List']}2"].value is not None + assert sheet1[f"{columns['Check List']}3"].value is not None + + assert sheet1[f"{columns['Check Regex List']}1"].value == "Check Regex List" + assert sheet1[f"{columns['Check Regex List']}2"].value is not None + assert sheet1[f"{columns['Check Regex List']}3"].value is not None + + assert sheet1[f"{columns['Check Regex Single']}1"].value == "Check Regex Single" + assert sheet1[f"{columns['Check Regex Single']}2"].value is not None + assert sheet1[f"{columns['Check Regex Single']}3"].value is not None + + assert sheet1[f"{columns['Check Regex Format']}1"].value == "Check Regex Format" + assert sheet1[f"{columns['Check Regex Format']}2"].value is not None + assert sheet1[f"{columns['Check Regex Format']}3"].value is not None + + assert ( + sheet1[f"{columns['Check Regex Integer']}1"].value == "Check Regex Integer" + ) + assert sheet1[f"{columns['Check Regex Integer']}2"].value is not None + assert sheet1[f"{columns['Check Regex Integer']}3"].value is not None + + assert sheet1[f"{columns['Check Num']}1"].value == "Check Num" + assert sheet1[f"{columns['Check Num']}2"].value is not None + assert sheet1[f"{columns['Check Num']}3"].value is not None + + assert sheet1[f"{columns['Check Float']}1"].value == "Check Float" + assert sheet1[f"{columns['Check Float']}2"].value is not None + assert sheet1[f"{columns['Check Float']}3"].value is not None + + assert sheet1[f"{columns['Check Int']}1"].value == "Check Int" + assert sheet1[f"{columns['Check Int']}2"].value is not None + assert sheet1[f"{columns['Check Int']}3"].value is not None + + required_columns = [ + "Component", + "Check List", + "Check List Enum", + "Check List Like", + "Check List Like Enum", + "Check List Strict", + "Check List Enum Strict", + "Check Regex List", + "Check Regex List Strict", + "Check Regex List Like", + "Check Regex Single", + "Check Regex Format", + "Check Regex Integer", + "Check Num", + "Check Float", + "Check Int", + "Check String", + "Check URL", + "Check Match at Least", + "Check Match Exactly", + "Check Match None", + "Check Match at Least values", + "Check Match Exactly values", + "Check Match None values", + "Check Ages", + "Check Unique", + "Check Range", + "Check Date", + "Check NA", + ] + + optional_columns = [ + "Patient ID", + "Sex", + "Diagnosis", + "Cancer Type", + "Family History", + "Sample ID", + "Tissue Status", + "Filename", + "File Format", + "Genome Build", + "Genome FASTA", + "Patient", + "Year of Birth", + "Cancer", + "Biospecimen", + "Bulk RNA-seq Assay", + "BAM", + "CRAM", + "CSV/TSV", + "MockComponent", + "Check Recommended", + "MockRDB", + "MockFilename", + "MockRDB_id", + "SourceManifest", + ] + + # Required columns are light blue + for col in required_columns: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == LIGHT_BLUE + + # Optional columns are in grey + for col in optional_columns: + if col in columns: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == GRAY + + +class TestDownloadManifest: + """Tests the command line interface for downloading a manifest""" + + def test_download_manifest_found( + self, + runner: CliRunner, + config: Configuration, + ) -> None: + # GIVEN a manifest name to download as + manifest_name = f"{uuid.uuid4()}" + + # AND a dataset id + dataset_id = "syn23643250" + + # AND a configuration file + config.load_config("config_example.yml") + + # WHEN the download command is run + result = runner.invoke( + cli=manifest, + args=[ + "--config", + config.config_path, + "download", + "--new_manifest_name", + manifest_name, + "--dataset_id", + dataset_id, + ], + ) + + # THEN the command should run successfully + assert result.exit_code == 0 + + # AND the manifest file should be created + expected_manifest_file = os.path.join( + config.manifest_folder, f"{manifest_name}.csv" + ) + assert os.path.exists(expected_manifest_file) + try: + os.remove(expected_manifest_file) + except Exception: + pass + + def test_download_manifest_not_found( + self, + runner: CliRunner, + config: Configuration, + ) -> None: + # GIVEN a manifest name to download as + manifest_name = f"{uuid.uuid4()}" + + # AND a dataset id that does not exist + dataset_id = "syn1234" + + # AND a configuration file + config.load_config("config_example.yml") + + # WHEN the download command is run + result = runner.invoke( + cli=manifest, + args=[ + "--config", + config.config_path, + "download", + "--new_manifest_name", + manifest_name, + "--dataset_id", + dataset_id, + ], + ) + + # THEN the command should not run successfully + assert result.exit_code == 1 + + # AND the manifest file should not be created + expected_manifest_file = os.path.join( + config.manifest_folder, f"{manifest_name}.csv" + ) + assert not os.path.exists(expected_manifest_file) diff --git a/tests/integration/test_manifest_generation.py b/tests/integration/test_manifest_generation.py new file mode 100644 index 000000000..2fe8c4896 --- /dev/null +++ b/tests/integration/test_manifest_generation.py @@ -0,0 +1,1233 @@ +""" +This module is responsible for running through the "Manifest Generation" portion of +the schematic API test plan found here: . + +There are a small number of items that need to be manually verified, and these are +noted in the test function docstrings. +""" +import os +from io import BytesIO + +import pytest +import requests +from flask.testing import FlaskClient +from openpyxl import load_workbook + +from tests.conftest import ConfigurationForTesting +from schematic.configuration.configuration import CONFIG + +LIGHT_BLUE = "FFEAF7F9" # Required cell +GRAY = "FFE0E0E0" # Header cell +WHITE = "00000000" # Optional cell + + +class TestManifestGeneration: + @pytest.mark.manual_verification_required + @pytest.mark.local_or_remote_api + def test_single_manifest_generation_excel( + self, + testing_config: ConfigurationForTesting, + flask_client: FlaskClient, + syn_token: str, + ) -> None: + """ + Download a manifest from the Schematic API and verify that it is a valid Excel + file. We are validating the following: + + - The first row of the Excel file contains the column headers + - The first row is locked on scroll + - Each cell in the first row has a comment/tooltip "TBD" + - The "Family History" cell has a comment/tooltip with the expected message + - The cell corresponding to "Sex" in Sheet1 has a dropdown list with values from Sheet2!B2:B4 + - The cell corresponding to "Diagnosis" in Sheet1 has a dropdown list with values from Sheet2!D2:D3 + - The cell corresponding to "Cancer Type" in Sheet1 has a dropdown list with values from Sheet2!F2:F6 + - The workbook contains two sheets: "Sheet1" and "Sheet2" + - "Sheet2" is hidden + - The values in "Sheet2" are as expected + - The fill colors of the first row cells are as expected + - Conditional formatting is functioning as expected + + + Manual verification steps: + + - Open the Excel file prefixed with TestManifestGeneration_test_single_manifest_generation_excel.xlsx + - When Diagnosis = "Cancer", [Cancer Type, Family History] is Light Blue (Required) + """ + # GIVEN a valid example manifest to generate + url = f"{testing_config.schematic_api_server_url}/v1/manifest/generate" + params = { + "schema_url": "https://raw.githubusercontent.com/Sage-Bionetworks/schematic/develop/tests/data/example.model.jsonld", + "title": "Example", + "data_type": "Patient", + "use_annotations": "false", + "output_format": "excel", + "strict_validation": "true", + "data_model_labels": "class_label", + } + headers = {"accept": "application/json", "Authorization": f"Bearer {syn_token}"} + + # WHEN we make a request to the Schematic API + response = ( + requests.get(url, headers=headers, params=params, timeout=300) + if testing_config.use_deployed_schematic_api_server + else flask_client.get(url, query_string=params, headers=headers) + ) + + # THEN we expect a successful response + assert response.status_code == 200 + + # Load the response content into memory + content = BytesIO( + response.content + if testing_config.use_deployed_schematic_api_server + else response.data + ) + workbook = load_workbook(content) + sheet1 = workbook["Sheet1"] + + # Track column positions + columns = {cell.value: cell.column_letter for cell in sheet1[1]} + + # AND the content of the first sheet is as expected + assert sheet1[f"{columns['Patient ID']}1"].value == "Patient ID" + assert sheet1[f"{columns['Patient ID']}2"].value is None + + assert sheet1[f"{columns['Sex']}1"].value == "Sex" + assert sheet1[f"{columns['Sex']}2"].value is None + + assert sheet1[f"{columns['Year of Birth']}1"].value == "Year of Birth" + assert sheet1[f"{columns['Year of Birth']}2"].value is None + + assert sheet1[f"{columns['Diagnosis']}1"].value == "Diagnosis" + assert sheet1[f"{columns['Diagnosis']}2"].value is None + + assert sheet1[f"{columns['Component']}1"].value == "Component" + assert sheet1[f"{columns['Component']}2"].value == "Patient" + + assert sheet1[f"{columns['Cancer Type']}1"].value == "Cancer Type" + assert sheet1[f"{columns['Cancer Type']}2"].value is None + + assert sheet1[f"{columns['Family History']}1"].value == "Family History" + assert sheet1[f"{columns['Family History']}2"].value is None + + # AND there are no more columns in the first sheet + assert sheet1[f"{columns['Family History']}1"].offset(column=1).value is None + + # AND the first row is locked on scroll + assert sheet1.freeze_panes == "A2" + + # AND each cell in the first row has a comment "TBD" + for col in [ + "Patient ID", + "Sex", + "Year of Birth", + "Diagnosis", + "Component", + "Cancer Type", + "Family History", + ]: + assert sheet1[f"{columns[col]}1"].comment.text == "TBD" + + # AND the comment in "Family History" cell is as expected + assert ( + sheet1[f"{columns['Family History']}2"].comment.text + == "Please enter applicable comma-separated items selected from the set of allowable terms for this attribute. See our data standards for allowable terms" + ) + + # AND the dropdown lists exist and are as expected + data_validations = sheet1.data_validations.dataValidation + sex_validation = None + diagnosis_validation = None + cancer_type_validation = None + for dv in data_validations: + if f"{columns['Sex']}2" in dv.sqref: + sex_validation = dv + continue + elif f"{columns['Diagnosis']}2" in dv.sqref: + diagnosis_validation = dv + continue + elif f"{columns['Cancer Type']}2" in dv.sqref: + cancer_type_validation = dv + continue + # AND there are no other data validations + assert False, f"Unexpected data validation found: {dv}" + + assert sex_validation is not None + assert sex_validation.type == "list" + assert sex_validation.formula1 == "Sheet2!$B$2:$B$4" + + assert diagnosis_validation is not None + assert diagnosis_validation.type == "list" + assert diagnosis_validation.formula1 == "Sheet2!$D$2:$D$3" + + assert cancer_type_validation is not None + assert cancer_type_validation.type == "list" + assert cancer_type_validation.formula1 == "Sheet2!$F$2:$F$6" + + # AND the fill colors are as expected + for col in ["Patient ID", "Sex", "Diagnosis", "Component"]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == LIGHT_BLUE + + for col in ["Year of Birth", "Cancer Type", "Family History"]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == GRAY + + for col in ["Patient ID", "Sex", "Diagnosis", "Component"]: + assert sheet1[f"{columns[col]}2"].fill.start_color.index == LIGHT_BLUE + + for col in ["Year of Birth", "Cancer Type", "Family History"]: + assert sheet1[f"{columns[col]}2"].fill.start_color.index == WHITE + + # AND conditional formatting is functioning as expected (MANUAL VERIFICATION) + workbook["Sheet1"][f"{columns['Diagnosis']}2"].value = "Cancer" + + # AND the workbook contains two sheets: "Sheet1" and "Sheet2" + assert workbook.sheetnames == ["Sheet1", "Sheet2"] + + sheet2 = workbook["Sheet2"] + + # AND the second sheet is hidden + assert sheet2.sheet_state == "hidden" + + # AND the values in "Sheet2" are as expected + assert sheet2["A1"].value == "Patient ID" + assert sheet2["A2"].value is None + assert sheet2["A3"].value is None + assert sheet2["A4"].value is None + assert sheet2["A5"].value is None + assert sheet2["A6"].value is None + + assert sheet2["B1"].value == "Sex" + assert sheet2["B2"].value == "Female" + assert sheet2["B3"].value == "Male" + assert sheet2["B4"].value == "Other" + assert sheet2["B5"].value is None + assert sheet2["B6"].value is None + + assert sheet2["C1"].value == "Year of Birth" + assert sheet2["C2"].value is None + assert sheet2["C3"].value is None + assert sheet2["C4"].value is None + assert sheet2["C5"].value is None + assert sheet2["C6"].value is None + + assert sheet2["D1"].value == "Diagnosis" + assert sheet2["D2"].value == "Cancer" + assert sheet2["D3"].value == "Healthy" + assert sheet2["D4"].value is None + assert sheet2["D5"].value is None + assert sheet2["D6"].value is None + + assert sheet2["E1"].value == "Component" + assert sheet2["E2"].value is None + assert sheet2["E3"].value is None + assert sheet2["E4"].value is None + assert sheet2["E5"].value is None + assert sheet2["E6"].value is None + + assert sheet2["F1"].value == "Cancer Type" + assert sheet2["F2"].value == "Breast" + assert sheet2["F3"].value == "Colorectal" + assert sheet2["F4"].value == "Lung" + assert sheet2["F5"].value == "Prostate" + assert sheet2["F6"].value == "Skin" + + assert sheet2["G1"].value == "Family History" + assert sheet2["G2"].value == "Breast" + assert sheet2["G3"].value == "Colorectal" + assert sheet2["G4"].value == "Lung" + assert sheet2["G5"].value == "Prostate" + assert sheet2["G6"].value == "Skin" + + # AND there are no more columns in the second sheet + assert sheet2["H1"].value is None + + # AND a copy of the Excel file is saved to the test directory for manual verification + if testing_config.manual_test_verification_enabled: + workbook.save( + os.path.join( + testing_config.manual_test_verification_path, + "TestManifestGeneration_test_single_manifest_generation_excel.xlsx", + ) + ) + + @pytest.mark.manual_verification_required + @pytest.mark.local_or_remote_api + def test_single_manifest_generation_google_sheet_with_annotations( + self, + testing_config: ConfigurationForTesting, + flask_client: FlaskClient, + syn_token: str, + ) -> None: + """ + Download a manifest with annotations from the Schematic API and verify that it is a valid Google + Sheet. We are validating the following: + + - The first row of the Google Sheet contains the column headers + - The first row is locked on scroll + - Each cell A-F in the first row has a comment "TBD" + - Each cell G-M in the first row does not have a comment + - The cell corresponding to "File Format" in Sheet1 has a dropdown list with values from Sheet2!C2:C5 + - The cell corresponding to "Genome Build" in Sheet1 has a dropdown list with values from Sheet2!E2:E5 + - The fill colors of the first row cells are as expected + - The workbook contains two sheets: "Sheet1" and "Sheet2" + - "Sheet2" is hidden + - The values in "Sheet1" are as expected + - The values in "Sheet2" are as expected + + Manual verification steps: + - Open the Excel file prefixed with TestManifestGeneration_test_single_manifest_generation_google_sheet_with_annotations.xlsx + - When File Format = "BAM", [Genome Build] is Light Blue (Required) + - When File Format = "CRAM", [Genome Build, Genome FASTA] is Light Blue (Required) + - When File Format = "FASTQ", [Genome Build] is White (Optional) + """ + # GIVEN a valid example manifest to generate + try: + url = f"{testing_config.schematic_api_server_url}/v1/manifest/generate" + params = { + "schema_url": "https://raw.githubusercontent.com/Sage-Bionetworks/schematic/develop/tests/data/example.model.jsonld", + "title": "Example", + "data_type": "BulkRNA-seqAssay", + "use_annotations": "true", + "dataset_id": "syn63561056", + "asset_view": "syn63561086", + "output_format": "google_sheet", + "strict_validation": "true", + "data_model_labels": "class_label", + } + headers = { + "accept": "application/json", + "Authorization": f"Bearer {syn_token}", + } + # WHEN we make a request to the Schematic API + response = ( + requests.get(url, headers=headers, params=params, timeout=300) + if testing_config.use_deployed_schematic_api_server + else flask_client.get(url, query_string=params, headers=headers) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + # THEN we expect a successful response + assert response.status_code == 200 + + # Load the Google Sheets URL from the response + response_content = ( + response.json() + if testing_config.use_deployed_schematic_api_server + else response.json + ) + assert len(response_content) == 1 + google_sheet_url = response_content[0] + assert ( + google_sheet_url is not None + ), "No Google Sheets URL found in the response" + + # Download the Google Sheets content as an Excel file and load into openpyxl + export_url = f"{google_sheet_url}/export?format=xlsx" + response = requests.get(export_url) + assert response.status_code == 200 + content = BytesIO(response.content) + workbook = load_workbook(content) + sheet1 = workbook["Sheet1"] + sheet2 = workbook["Sheet2"] + + # Track column positions + columns = {cell.value: cell.column_letter for cell in sheet1[1]} + + # AND the content of the first sheet is as expected + assert columns["Filename"] is not None + assert columns["Sample ID"] is not None + assert columns["File Format"] is not None + assert columns["Component"] is not None + assert columns["Genome Build"] is not None + assert columns["Genome FASTA"] is not None + assert columns["eTag"] is not None + assert columns["key_bool"] is not None + assert columns["key_int"] is not None + assert columns["key_float"] is not None + assert columns["key_str"] is not None + assert columns["key_datetime"] is not None + assert columns["entityId"] is not None + + assert ( + sheet1[f"{columns['Filename']}2"].value + == "Manifest generation - Manual test - generate an existing manifest/test dataset/test dataset 1/sample A.txt" + ) + assert sheet1[f"{columns['Sample ID']}2"].value is None + assert sheet1[f"{columns['File Format']}2"].value is None + assert sheet1[f"{columns['Component']}2"].value == "BulkRNA-seqAssay" + assert sheet1[f"{columns['Genome Build']}2"].value is None + assert sheet1[f"{columns['Genome FASTA']}2"].value is None + assert sheet1[f"{columns['eTag']}2"].value is not None # eTag + assert isinstance(sheet1[f"{columns['key_bool']}2"].value, bool) + assert sheet1[f"{columns['key_bool']}2"].value + assert sheet1[f"{columns['key_int']}2"].value == 6 + assert sheet1[f"{columns['key_float']}2"].value == 80 + assert sheet1[f"{columns['key_str']}2"].value == "New Value" + assert sheet1[f"{columns['key_datetime']}2"].value is not None # key_datetime + assert sheet1[f"{columns['entityId']}2"].value == "syn63561081" + + assert ( + sheet1[f"{columns['Filename']}3"].value + == "Manifest generation - Manual test - generate an existing manifest/test dataset/test dataset 2/sample B.txt" + ) + assert sheet1[f"{columns['Sample ID']}3"].value is None + assert sheet1[f"{columns['File Format']}3"].value is None + assert sheet1[f"{columns['Component']}3"].value == "BulkRNA-seqAssay" + assert sheet1[f"{columns['Genome Build']}3"].value is None + assert sheet1[f"{columns['Genome FASTA']}3"].value is None + assert sheet1[f"{columns['eTag']}3"].value is not None # eTag + assert sheet1[f"{columns['key_bool']}3"].value is None + assert sheet1[f"{columns['key_int']}3"].value is None + assert sheet1[f"{columns['key_float']}3"].value is None + assert sheet1[f"{columns['key_str']}3"].value is None + assert sheet1[f"{columns['key_datetime']}3"].value is None + assert sheet1[f"{columns['entityId']}3"].value == "syn63561082" + + assert ( + sheet1[f"{columns['Filename']}4"].value + == "Manifest generation - Manual test - generate an existing manifest/test dataset/test dataset 3/sample C.txt" + ) + assert sheet1[f"{columns['Sample ID']}4"].value is None + assert sheet1[f"{columns['File Format']}4"].value is None + assert sheet1[f"{columns['Component']}4"].value == "BulkRNA-seqAssay" + assert sheet1[f"{columns['Genome Build']}4"].value is None + assert sheet1[f"{columns['Genome FASTA']}4"].value is None + assert sheet1[f"{columns['eTag']}4"].value is not None # eTag + assert sheet1[f"{columns['key_bool']}4"].value is None + assert sheet1[f"{columns['key_int']}4"].value is None + assert sheet1[f"{columns['key_float']}4"].value is None + assert sheet1[f"{columns['key_str']}4"].value is None + assert sheet1[f"{columns['key_datetime']}4"].value is None + assert sheet1[f"{columns['entityId']}4"].value == "syn63561085" + + # AND there are no more columns in the first sheet + assert sheet1[f"{columns['entityId']}1"].offset(column=1).value is None + + # AND the first row is locked on scroll + assert sheet1.freeze_panes == "A2" + + # AND each of these cells in the first row has a comment "TBD" + for col in [ + "Filename", + "Sample ID", + "File Format", + "Component", + "Genome Build", + "Genome FASTA", + ]: + assert sheet1[f"{columns[col]}1"].comment.text == "TBD" + + # AND each of these cells in the first row do not have a comment + for col in [ + "eTag", + "key_bool", + "key_int", + "key_float", + "key_str", + "key_datetime", + "entityId", + ]: + assert sheet1[f"{columns[col]}1"].comment is None + + # AND the dropdown lists exist and are as expected + data_validations = sheet1.data_validations.dataValidation + file_format_validation = None + genome_build_validation = None + for dv in data_validations: + if f"{columns['File Format']}2" in dv.sqref: + file_format_validation = dv + continue + elif f"{columns['Genome Build']}2" in dv.sqref: + genome_build_validation = dv + continue + # AND there are no other data validations + assert False, f"Unexpected data validation found: {dv}" + + assert file_format_validation is not None + assert file_format_validation.type == "list" + assert ( + file_format_validation.formula1 + == f"Sheet2!${columns['File Format']}$2:${columns['File Format']}$5" + ) + + assert genome_build_validation is not None + assert genome_build_validation.type == "list" + assert ( + genome_build_validation.formula1 + == f"Sheet2!${columns['Genome Build']}$2:${columns['Genome Build']}$5" + ) + + # AND the fill colors are as expected + for col in ["Filename", "Sample ID", "File Format", "Component"]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == LIGHT_BLUE + + for col in [ + "Genome Build", + "Genome FASTA", + "eTag", + "key_bool", + "key_int", + "key_float", + "key_str", + "key_datetime", + "entityId", + ]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == GRAY + + # AND conditional formatting is functioning as expected (MANUAL VERIFICATION) + workbook["Sheet1"][f"{columns['File Format']}2"].value = "BAM" + workbook["Sheet1"][f"{columns['File Format']}3"].value = "CRAM" + workbook["Sheet1"][f"{columns['File Format']}4"].value = "FASTQ" + + # AND the workbook contains two sheets: "Sheet1" and "Sheet2" + assert workbook.sheetnames == ["Sheet1", "Sheet2"] + + # AND the second sheet is hidden + assert sheet2.sheet_state == "hidden" + + # AND the values in "Sheet2" are as expected + assert sheet2["A1"].value == "Filename" + assert sheet2["B1"].value == "Sample ID" + assert sheet2["C1"].value == "File Format" + assert sheet2["D1"].value == "Component" + assert sheet2["E1"].value == "Genome Build" + assert sheet2["F1"].value == "Genome FASTA" + + assert sheet2["A2"].value is None + assert sheet2["B2"].value is None + assert sheet2["C2"].value == "BAM" + assert sheet2["D2"].value is None + assert sheet2["E2"].value == "GRCh37" + assert sheet2["F2"].value is None + + assert sheet2["A3"].value is None + assert sheet2["B3"].value is None + assert sheet2["C3"].value == "CRAM" + assert sheet2["D3"].value is None + assert sheet2["E3"].value == "GRCh38" + assert sheet2["F3"].value is None + + assert sheet2["A4"].value is None + assert sheet2["B4"].value is None + assert sheet2["C4"].value == "CSV/TSV" + assert sheet2["D4"].value is None + assert sheet2["E4"].value == "GRCm38" + assert sheet2["F4"].value is None + + assert sheet2["A5"].value is None + assert sheet2["B5"].value is None + assert sheet2["C5"].value == "FASTQ" + assert sheet2["D5"].value is None + assert sheet2["E5"].value == "GRCm39" + assert sheet2["F5"].value is None + + # AND there are no more columns in the second sheet + assert sheet2["G1"].value is None + + # AND a copy of the Excel file is saved to the test directory for manual verification + if testing_config.manual_test_verification_enabled: + workbook.save( + os.path.join( + testing_config.manual_test_verification_path, + "TestManifestGeneration_test_single_manifest_generation_google_sheet_with_annotations.xlsx", + ) + ) + + @pytest.mark.manual_verification_required + @pytest.mark.local_or_remote_api + def test_single_manifest_generation_google_sheet_no_annotations( + self, + testing_config: ConfigurationForTesting, + flask_client: FlaskClient, + syn_token: str, + ) -> None: + """ + Download a manifest without annotations from the Schematic API and verify that it is a valid Google + Sheet. We are validating the following: + + - The first row of the Google Sheet contains the column headers + - The first row is locked on scroll + - Each cell A-F in the first row has a comment "TBD" + - Cell G in the first row does not have a comment + - The second cell in the "File Format" column in "Sheet1" has a dropdown list with the correct values from "Sheet2" + - The second cell in the "Genome Build" column in "Sheet1" has a dropdown list with the correct values from "Sheet2" + - The fill colors of the first row cells are as expected + - The workbook contains two sheets: "Sheet1" and "Sheet2" + - "Sheet2" is hidden + - The values in "Sheet1" are as expected + - The values in "Sheet2" are as expected + + Manual verification steps: + - Open the Excel file prefixed with TestManifestGeneration_test_single_manifest_generation_google_sheet_no_annotations.xlsx + - When File Format = "BAM", [Genome Build] is Light Blue (Required) + - When File Format = "CRAM", [Genome Build, Genome FASTA] is Light Blue (Required) + - When File Format = "FASTQ", [Genome Build] is White (Optional) + """ + try: + url = f"{testing_config.schematic_api_server_url}/v1/manifest/generate" + # GIVEN a valid request to the Schematic API to generate a Google Sheet manifest without annotations + params = { + "schema_url": "https://raw.githubusercontent.com/Sage-Bionetworks/schematic/develop/tests/data/example.model.jsonld", + "title": "Example", + "data_type": "BulkRNA-seqAssay", + "use_annotations": "false", + "dataset_id": "syn63561056", + "asset_view": "syn63561086", + "output_format": "google_sheet", + "strict_validation": "true", + "data_model_labels": "class_label", + } + headers = { + "accept": "application/json", + "Authorization": f"Bearer {syn_token}", + } + # WHEN we make a request to the Schematic API + response = ( + requests.get(url, headers=headers, params=params, timeout=300) + if testing_config.use_deployed_schematic_api_server + else flask_client.get(url, query_string=params, headers=headers) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + # THEN we expect a successful response + assert response.status_code == 200 + + # Load the Google Sheets URL from the response + response_content = ( + response.json() + if testing_config.use_deployed_schematic_api_server + else response.json + ) + assert len(response_content) == 1, "Expected a single URL in the response" + google_sheet_url = response_content[0] + assert ( + google_sheet_url is not None + ), "No Google Sheets URL found in the response" + + # Convert the Google Sheets URL to an export URL for Excel format + export_url = f"{google_sheet_url}/export?format=xlsx" + + # AND we should be able to download the manifest as an Excel file + response = requests.get(export_url) + assert response.status_code == 200 + content = BytesIO(response.content) + workbook = load_workbook(content) + sheet1 = workbook["Sheet1"] + sheet2 = workbook["Sheet2"] + + # Track column positions + columns = {cell.value: cell.column_letter for cell in sheet1[1]} + + # AND the content of sheet1 is as expected + assert columns["Filename"] is not None + assert columns["Sample ID"] is not None + assert columns["File Format"] is not None + assert columns["Component"] is not None + assert columns["Genome Build"] is not None + assert columns["Genome FASTA"] is not None + assert columns["entityId"] is not None + + assert ( + sheet1[f"{columns['Filename']}2"].value + == "Manifest generation - Manual test - generate an existing manifest/test dataset/test dataset 1/sample A.txt" + ) + assert sheet1[f"{columns['Sample ID']}2"].value is None + assert sheet1[f"{columns['File Format']}2"].value is None + assert sheet1[f"{columns['Component']}2"].value == "BulkRNA-seqAssay" + assert sheet1[f"{columns['Genome Build']}2"].value is None + assert sheet1[f"{columns['Genome FASTA']}2"].value is None + assert sheet1[f"{columns['entityId']}2"].value == "syn63561081" + + assert ( + sheet1[f"{columns['Filename']}3"].value + == "Manifest generation - Manual test - generate an existing manifest/test dataset/test dataset 2/sample B.txt" + ) + assert sheet1[f"{columns['Sample ID']}3"].value is None + assert sheet1[f"{columns['File Format']}3"].value is None + assert sheet1[f"{columns['Component']}3"].value == "BulkRNA-seqAssay" + assert sheet1[f"{columns['Genome Build']}3"].value is None + assert sheet1[f"{columns['Genome FASTA']}3"].value is None + assert sheet1[f"{columns['entityId']}3"].value == "syn63561082" + + assert ( + sheet1[f"{columns['Filename']}4"].value + == "Manifest generation - Manual test - generate an existing manifest/test dataset/test dataset 3/sample C.txt" + ) + assert sheet1[f"{columns['Sample ID']}4"].value is None + assert sheet1[f"{columns['File Format']}4"].value is None + assert sheet1[f"{columns['Component']}4"].value == "BulkRNA-seqAssay" + assert sheet1[f"{columns['Genome Build']}4"].value is None + assert sheet1[f"{columns['Genome FASTA']}4"].value is None + assert sheet1[f"{columns['entityId']}4"].value == "syn63561085" + + # AND there are no more columns in the sheet + assert sheet1[f"{columns['entityId']}1"].offset(column=1).value is None + + # AND the first row is locked on scroll + assert sheet1.freeze_panes == "A2" + + # AND each of these cells in the first row has a comment "TBD" + for col in [ + "Filename", + "Sample ID", + "File Format", + "Component", + "Genome Build", + "Genome FASTA", + ]: + assert sheet1[f"{columns[col]}1"].comment.text == "TBD" + + # AND the entityId column in the first row does not have a comment + assert sheet1[f"{columns['entityId']}1"].comment is None + + # AND the dropdown lists exist and are as expected + data_validations = sheet1.data_validations.dataValidation + file_format_validation = None + genome_build_validation = None + for dv in data_validations: + if f"{columns['File Format']}2" in dv.sqref: + file_format_validation = dv + continue + elif f"{columns['Genome Build']}2" in dv.sqref: + genome_build_validation = dv + continue + # AND there are no other data validations + assert False, f"Unexpected data validation found: {dv}" + + assert file_format_validation is not None + assert file_format_validation.type == "list" + assert ( + file_format_validation.formula1 + == f"Sheet2!${columns['File Format']}$2:${columns['File Format']}$5" + ) + + assert genome_build_validation is not None + assert genome_build_validation.type == "list" + assert ( + genome_build_validation.formula1 + == f"Sheet2!${columns['Genome Build']}$2:${columns['Genome Build']}$5" + ) + + # AND the fill colors are as expected + for col in ["Filename", "Sample ID", "File Format", "Component"]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == LIGHT_BLUE + + for col in [ + "Genome Build", + "Genome FASTA", + "entityId", + ]: + assert sheet1[f"{columns[col]}1"].fill.start_color.index == GRAY + + # AND conditional formatting is functioning as expected (MANUAL VERIFICATION) + workbook["Sheet1"][f"{columns['File Format']}2"].value = "BAM" + workbook["Sheet1"][f"{columns['File Format']}3"].value = "CRAM" + workbook["Sheet1"][f"{columns['File Format']}4"].value = "FASTQ" + + # AND the workbook contains two sheets: "Sheet1" and "Sheet2" + assert workbook.sheetnames == ["Sheet1", "Sheet2"] + + # AND the second sheet is hidden + assert sheet2.sheet_state == "hidden" + + # AND the values in "Sheet2" are as expected + assert sheet2["A1"].value == "Filename" + assert sheet2["B1"].value == "Sample ID" + assert sheet2["C1"].value == "File Format" + assert sheet2["D1"].value == "Component" + assert sheet2["E1"].value == "Genome Build" + assert sheet2["F1"].value == "Genome FASTA" + + assert sheet2["A2"].value is None + assert sheet2["B2"].value is None + assert sheet2["C2"].value == "BAM" + assert sheet2["D2"].value is None + assert sheet2["E2"].value == "GRCh37" + assert sheet2["F2"].value is None + + assert sheet2["A3"].value is None + assert sheet2["B3"].value is None + assert sheet2["C3"].value == "CRAM" + assert sheet2["D3"].value is None + assert sheet2["E3"].value == "GRCh38" + assert sheet2["F3"].value is None + + assert sheet2["A4"].value is None + assert sheet2["B4"].value is None + assert sheet2["C4"].value == "CSV/TSV" + assert sheet2["D4"].value is None + assert sheet2["E4"].value == "GRCm38" + assert sheet2["F4"].value is None + + assert sheet2["A5"].value is None + assert sheet2["B5"].value is None + assert sheet2["C5"].value == "FASTQ" + assert sheet2["D5"].value is None + assert sheet2["E5"].value == "GRCm39" + assert sheet2["F5"].value is None + + # AND there are no more columns in the second sheet + assert sheet2["G1"].value is None + + # AND a copy of the Excel file is saved to the test directory for manual verification + if testing_config.manual_test_verification_enabled: + workbook.save( + os.path.join( + testing_config.manual_test_verification_path, + "TestManifestGeneration_test_single_manifest_generation_google_sheet_no_annotations.xlsx", + ) + ) + + @pytest.mark.manual_verification_required + @pytest.mark.local_or_remote_api + def test_manifest_generation_multiple_blank_google_sheets( + self, + testing_config: ConfigurationForTesting, + flask_client: FlaskClient, + syn_token: str, + ) -> None: + """ + Download two blank manifests from the Schematic API and verify that they are valid Google Sheets. + We are validating the following: + + For the Patient Google Sheet: + - The first row of the Google Sheet contains the column headers + - The first row is locked on scroll + - Each cell A-G in the first row has a comment "TBD" + - The "Family History" cell has a comment/tooltip with the expected message + - Cell H in the first row does not have a comment + - The "Sex" column in "Sheet1" has a dropdown list with the correct values from "Sheet2" + - The "Diagnosis" column in "Sheet1" has a dropdown list with the correct values from "Sheet2" + - The "Cancer Type" column in "Sheet1" has a dropdown list with the correct values from "Sheet2" + - The "Family History" column in "Sheet1" has a comment that starts with "Please enter applicable comma-separated items" + - The fill colors of the first row cells are as expected + - The workbook contains two sheets: "Sheet1" and "Sheet2" + - The second sheet is hidden + - The values in "Sheet1" are as expected + - The values in "Sheet2" are as expected + + Manual verification steps (Patient Google Sheet): + - Open the Excel file prefixed with TestManifestGeneration_test_multiple_blank_google_sheets_patient.xlsx + - When Diagnosis = "Cancer", [Cancer Type, Family History] are Light Blue (Required) + + For the Bulk RNA-seq Assay Google Sheet: + - The first row of the Google Sheet contains the column headers + - The first row is locked on scroll + - Each cell A-F in the first row has a comment "TBD" + - Each cell G-M in the first row does not have a comment + - The "File Format" column in "Sheet1" has a dropdown list with the correct values from "Sheet2" + - The "Genome Build" column in "Sheet1" has a dropdown list with the correct values from "Sheet2" + - The fill colors of the first row cells are as expected + - The workbook contains two sheets: "Sheet1" and "Sheet2" + - "Sheet2" is hidden + - The values in "Sheet1" are as expected + - The values in "Sheet2" are as expected + + Manual verification steps (Bulk RNA-seq Assay Google Sheet): + - Open the Excel file prefixed with TestManifestGeneration_test_multiple_blank_google_sheets_rna_seq.xlsx + - When File Format = "BAM", [Genome Build] is Light Blue (Required) + - When File Format = "CRAM", [Genome Build, Genome FASTA] is Light Blue (Required) + - When File Format = "FASTQ", [Genome Build] is White (Optional) + """ + url = f"{testing_config.schematic_api_server_url}/v1/manifest/generate" + # GIVEN a valid request to the Schematic API to generate two blank Google Sheets manifests + params = { + "schema_url": "https://raw.githubusercontent.com/Sage-Bionetworks/schematic/develop/tests/data/example.model.jsonld", + "title": "Example", + "data_type": "Patient,BulkRNA-seqAssay", + "use_annotations": "false", + "output_format": "google_sheet", + "strict_validation": "true", + "data_model_labels": "class_label", + } + headers = {"accept": "application/json", "Authorization": f"Bearer {syn_token}"} + # WHEN we make a request to the Schematic API + response = ( + requests.get(url, headers=headers, params=params, timeout=300) + if testing_config.use_deployed_schematic_api_server + else flask_client.get(url, query_string=params, headers=headers) + ) + + # THEN we expect a successful response + assert response.status_code == 200 + + # Load the Google Sheets URLs from the response + response_content = ( + response.json() + if testing_config.use_deployed_schematic_api_server + else response.json + ) + assert ( + len(response_content) == 2 + ), "Expected two Google Sheets URLs in the response" + google_sheet_urls = response_content + assert ( + google_sheet_urls is not None + ), "No Google Sheets URLs found in the response" + + # Convert the Google Sheets URLs to export URLs for Excel format + export_urls = [f"{url}/export?format=xlsx" for url in google_sheet_urls] + patient_export_url = export_urls[0] + rna_seq_export_url = export_urls[1] + + # AND we should be able to download the patient manifest as an Excel file + patient_response = requests.get(patient_export_url) + assert patient_response.status_code == 200 + patient_content = BytesIO(patient_response.content) + patient_workbook = load_workbook(patient_content) + patient_sheet1 = patient_workbook["Sheet1"] + patient_sheet2 = patient_workbook["Sheet2"] + + # Track column positions + patient_columns = {cell.value: cell.column_letter for cell in patient_sheet1[1]} + + # AND the content of sheet1 is as expected + assert patient_sheet1[f"{patient_columns['Patient ID']}1"].value == "Patient ID" + assert patient_sheet1[f"{patient_columns['Sex']}1"].value == "Sex" + assert ( + patient_sheet1[f"{patient_columns['Year of Birth']}1"].value + == "Year of Birth" + ) + assert patient_sheet1[f"{patient_columns['Diagnosis']}1"].value == "Diagnosis" + assert patient_sheet1[f"{patient_columns['Component']}1"].value == "Component" + assert ( + patient_sheet1[f"{patient_columns['Cancer Type']}1"].value == "Cancer Type" + ) + assert ( + patient_sheet1[f"{patient_columns['Family History']}1"].value + == "Family History" + ) + + assert patient_sheet1[f"{patient_columns['Patient ID']}2"].value is None + assert patient_sheet1[f"{patient_columns['Sex']}2"].value is None + assert patient_sheet1[f"{patient_columns['Year of Birth']}2"].value is None + assert patient_sheet1[f"{patient_columns['Diagnosis']}2"].value is None + assert patient_sheet1[f"{patient_columns['Component']}2"].value == "Patient" + assert patient_sheet1[f"{patient_columns['Cancer Type']}2"].value is None + assert patient_sheet1[f"{patient_columns['Family History']}2"].value is None + + # AND there are no more columns in the first sheet + assert ( + patient_sheet1[f"{patient_columns['Family History']}1"] + .offset(column=1) + .value + is None + ) + + # AND the first row is locked on scroll + assert patient_sheet1.freeze_panes == "A2" + + # AND each cell in the first row has a comment "TBD" + for col in [ + "Patient ID", + "Sex", + "Year of Birth", + "Diagnosis", + "Component", + "Cancer Type", + "Family History", + ]: + assert patient_sheet1[f"{patient_columns[col]}1"].comment.text == "TBD" + + # AND the comment in "Family History" cell is as expected + assert ( + patient_sheet1[f"{patient_columns['Family History']}2"].comment.text + == "Please enter applicable comma-separated items selected from the set of allowable terms for this attribute. See our data standards for allowable terms" + ) + + # AND the dropdown lists exist and are as expected + data_validations = patient_sheet1.data_validations.dataValidation + sex_validation = None + diagnosis_validation = None + cancer_type_validation = None + for dv in data_validations: + if f"{patient_columns['Sex']}2" in dv.sqref: + sex_validation = dv + continue + elif f"{patient_columns['Diagnosis']}2" in dv.sqref: + diagnosis_validation = dv + continue + elif f"{patient_columns['Cancer Type']}2" in dv.sqref: + cancer_type_validation = dv + continue + # AND there are no other data validations + assert False, f"Unexpected data validation found: {dv}" + + assert sex_validation is not None + assert sex_validation.type == "list" + assert ( + sex_validation.formula1 + == f"Sheet2!${patient_columns['Sex']}$2:${patient_columns['Sex']}$4" + ) + + assert diagnosis_validation is not None + assert diagnosis_validation.type == "list" + assert ( + diagnosis_validation.formula1 + == f"Sheet2!${patient_columns['Diagnosis']}$2:${patient_columns['Diagnosis']}$3" + ) + + assert cancer_type_validation is not None + assert cancer_type_validation.type == "list" + assert ( + cancer_type_validation.formula1 + == f"Sheet2!${patient_columns['Cancer Type']}$2:${patient_columns['Cancer Type']}$6" + ) + + # AND the fill colors are as expected + for col in ["Patient ID", "Sex", "Diagnosis", "Component"]: + assert ( + patient_sheet1[f"{patient_columns[col]}1"].fill.start_color.index + == LIGHT_BLUE + ) + + for col in ["Patient ID", "Sex", "Diagnosis", "Component"]: + assert ( + patient_sheet1[f"{patient_columns[col]}2"].fill.start_color.index + == LIGHT_BLUE + ) + + for col in ["Year of Birth", "Cancer Type", "Family History"]: + assert ( + patient_sheet1[f"{patient_columns[col]}1"].fill.start_color.index + == GRAY + ) + + for col in ["Year of Birth", "Cancer Type", "Family History"]: + assert ( + patient_sheet1[f"{patient_columns[col]}2"].fill.start_color.index + == WHITE + ) + + # AND conditional formatting is functioning as expected (MANUAL VERIFICATION) + patient_workbook["Sheet1"][f"{patient_columns['Diagnosis']}2"].value = "Cancer" + + # AND the workbook contains two sheets: "Sheet1" and "Sheet2" + assert patient_workbook.sheetnames == ["Sheet1", "Sheet2"] + + # AND the second sheet is hidden + assert patient_sheet2.sheet_state == "hidden" + + # AND the values in "Sheet2" are as expected + assert patient_sheet2["A1"].value == "Patient ID" + assert patient_sheet2["A2"].value is None + assert patient_sheet2["A3"].value is None + assert patient_sheet2["A4"].value is None + assert patient_sheet2["A5"].value is None + assert patient_sheet2["A6"].value is None + + assert patient_sheet2["B1"].value == "Sex" + assert patient_sheet2["B2"].value == "Female" + assert patient_sheet2["B3"].value == "Male" + assert patient_sheet2["B4"].value == "Other" + assert patient_sheet2["B5"].value is None + assert patient_sheet2["B6"].value is None + + assert patient_sheet2["C1"].value == "Year of Birth" + assert patient_sheet2["C2"].value is None + assert patient_sheet2["C3"].value is None + assert patient_sheet2["C4"].value is None + assert patient_sheet2["C5"].value is None + assert patient_sheet2["C6"].value is None + + assert patient_sheet2["D1"].value == "Diagnosis" + assert patient_sheet2["D2"].value == "Cancer" + assert patient_sheet2["D3"].value == "Healthy" + assert patient_sheet2["D4"].value is None + assert patient_sheet2["D5"].value is None + assert patient_sheet2["D6"].value is None + + assert patient_sheet2["E1"].value == "Component" + assert patient_sheet2["E2"].value is None + assert patient_sheet2["E3"].value is None + assert patient_sheet2["E4"].value is None + assert patient_sheet2["E5"].value is None + assert patient_sheet2["E6"].value is None + + assert patient_sheet2["F1"].value == "Cancer Type" + assert patient_sheet2["F2"].value == "Breast" + assert patient_sheet2["F3"].value == "Colorectal" + assert patient_sheet2["F4"].value == "Lung" + assert patient_sheet2["F5"].value == "Prostate" + assert patient_sheet2["F6"].value == "Skin" + + assert patient_sheet2["G1"].value == "Family History" + assert patient_sheet2["G2"].value == "Breast" + assert patient_sheet2["G3"].value == "Colorectal" + assert patient_sheet2["G4"].value == "Lung" + assert patient_sheet2["G5"].value == "Prostate" + assert patient_sheet2["G6"].value == "Skin" + + # AND there are no more columns in the second sheet + assert patient_sheet2["H1"].value is None + + # AND a copy of the Excel file is saved to the test directory for manual verification + if testing_config.manual_test_verification_enabled: + patient_workbook.save( + os.path.join( + testing_config.manual_test_verification_path, + "TestManifestGeneration_test_multiple_blank_google_sheets_patient.xlsx", + ) + ) + + # AND we should be able to download the Bulk RNA-seq assay manifest as an Excel file + rna_seq_response = requests.get(rna_seq_export_url) + assert rna_seq_response.status_code == 200 + rna_seq_content = BytesIO(rna_seq_response.content) + rna_seq_workbook = load_workbook(rna_seq_content) + rna_seq_sheet1 = rna_seq_workbook["Sheet1"] + rna_seq_sheet2 = rna_seq_workbook["Sheet2"] + + # Track column positions + rna_seq_columns = {cell.value: cell.column_letter for cell in rna_seq_sheet1[1]} + + # AND the content of "Sheet1" is as expected + assert rna_seq_columns["Filename"] is not None + assert rna_seq_columns["Sample ID"] is not None + assert rna_seq_columns["File Format"] is not None + assert rna_seq_columns["Component"] is not None + assert rna_seq_columns["Genome Build"] is not None + assert rna_seq_columns["Genome FASTA"] is not None + + assert rna_seq_sheet1[f"{rna_seq_columns['Filename']}2"].value is None + assert rna_seq_sheet1[f"{rna_seq_columns['Sample ID']}2"].value is None + assert rna_seq_sheet1[f"{rna_seq_columns['File Format']}2"].value is None + assert ( + rna_seq_sheet1[f"{rna_seq_columns['Component']}2"].value + == "BulkRNA-seqAssay" + ) + assert rna_seq_sheet1[f"{rna_seq_columns['Genome Build']}2"].value is None + assert rna_seq_sheet1[f"{rna_seq_columns['Genome FASTA']}2"].value is None + + # AND there are no more columns in the sheet + assert ( + rna_seq_sheet1[f"{rna_seq_columns['Genome FASTA']}1"].offset(column=1).value + is None + ) + + # AND the first row is locked on scroll + assert rna_seq_sheet1.freeze_panes == "A2" + + # AND each cell in the first row has a comment "TBD" + for col in [ + "Filename", + "Sample ID", + "File Format", + "Component", + "Genome Build", + "Genome FASTA", + ]: + assert rna_seq_sheet1[f"{rna_seq_columns[col]}1"].comment.text == "TBD" + + # AND the dropdown lists exist and are as expected + data_validations = rna_seq_sheet1.data_validations.dataValidation + file_format_validation = None + genome_build_validation = None + for dv in data_validations: + if f"{rna_seq_columns['File Format']}2" in dv.sqref: + file_format_validation = dv + continue + elif f"{rna_seq_columns['Genome Build']}2" in dv.sqref: + genome_build_validation = dv + continue + # AND there are no other data validations + assert False, f"Unexpected data validation found: {dv}" + + assert file_format_validation is not None + assert file_format_validation.type == "list" + assert ( + file_format_validation.formula1 + == f"Sheet2!${rna_seq_columns['File Format']}$2:${rna_seq_columns['File Format']}$5" + ) + + assert genome_build_validation is not None + assert genome_build_validation.type == "list" + assert ( + genome_build_validation.formula1 + == f"Sheet2!${rna_seq_columns['Genome Build']}$2:${rna_seq_columns['Genome Build']}$5" + ) + + # AND the fill colors are as expected + for col in ["Filename", "Sample ID", "File Format", "Component"]: + assert ( + rna_seq_sheet1[f"{rna_seq_columns[col]}1"].fill.start_color.index + == LIGHT_BLUE + ) + + for col in [ + "Genome Build", + "Genome FASTA", + ]: + assert ( + rna_seq_sheet1[f"{rna_seq_columns[col]}1"].fill.start_color.index + == GRAY + ) + assert ( + rna_seq_sheet1[f"{rna_seq_columns[col]}2"].fill.start_color.index + == WHITE + ) + + # AND conditional formatting is functioning as expected (MANUAL VERIFICATION) + rna_seq_workbook["Sheet1"][f"{rna_seq_columns['File Format']}2"].value = "BAM" + rna_seq_workbook["Sheet1"][f"{rna_seq_columns['File Format']}3"].value = "CRAM" + rna_seq_workbook["Sheet1"][f"{rna_seq_columns['File Format']}4"].value = "FASTQ" + + # AND the workbook contains two sheets: "Sheet1" and "Sheet2" + assert rna_seq_workbook.sheetnames == ["Sheet1", "Sheet2"] + + # AND the second sheet is hidden + assert rna_seq_sheet2.sheet_state == "hidden" + + # AND the values in "Sheet2" are as expected + assert rna_seq_sheet2["A1"].value == "Filename" + assert rna_seq_sheet2["B1"].value == "Sample ID" + assert rna_seq_sheet2["C1"].value == "File Format" + assert rna_seq_sheet2["D1"].value == "Component" + assert rna_seq_sheet2["E1"].value == "Genome Build" + assert rna_seq_sheet2["F1"].value == "Genome FASTA" + + assert rna_seq_sheet2["A2"].value is None + assert rna_seq_sheet2["B2"].value is None + assert rna_seq_sheet2["C2"].value == "BAM" + assert rna_seq_sheet2["D2"].value is None + assert rna_seq_sheet2["E2"].value == "GRCh37" + assert rna_seq_sheet2["F2"].value is None + + assert rna_seq_sheet2["A3"].value is None + assert rna_seq_sheet2["B3"].value is None + assert rna_seq_sheet2["C3"].value == "CRAM" + assert rna_seq_sheet2["D3"].value is None + assert rna_seq_sheet2["E3"].value == "GRCh38" + assert rna_seq_sheet2["F3"].value is None + + assert rna_seq_sheet2["A4"].value is None + assert rna_seq_sheet2["B4"].value is None + assert rna_seq_sheet2["C4"].value == "CSV/TSV" + assert rna_seq_sheet2["D4"].value is None + assert rna_seq_sheet2["E4"].value == "GRCm38" + assert rna_seq_sheet2["F4"].value is None + + assert rna_seq_sheet2["A5"].value is None + assert rna_seq_sheet2["B5"].value is None + assert rna_seq_sheet2["C5"].value == "FASTQ" + assert rna_seq_sheet2["D5"].value is None + assert rna_seq_sheet2["E5"].value == "GRCm39" + assert rna_seq_sheet2["F5"].value is None + + # And there are no more columns in the second sheet + assert rna_seq_sheet2["G1"].value is None + + # AND a copy of the Excel file is saved to the test directory for manual verification + if testing_config.manual_test_verification_enabled: + rna_seq_workbook.save( + os.path.join( + testing_config.manual_test_verification_path, + "TestManifestGeneration_test_multiple_blank_google_sheets_rna_seq.xlsx", + ) + ) diff --git a/tests/integration/test_manifest_submission.py b/tests/integration/test_manifest_submission.py new file mode 100644 index 000000000..92e6911c1 --- /dev/null +++ b/tests/integration/test_manifest_submission.py @@ -0,0 +1,1166 @@ +import logging +import os +import tempfile +import uuid +from typing import Any, Callable, Dict + +import pandas as pd +import pytest +import requests +from flask.testing import FlaskClient +from synapseclient.client import Synapse + +from schematic.configuration.configuration import CONFIG +from schematic.store.synapse import SynapseStorage +from tests.conftest import ConfigurationForTesting, Helpers +from tests.utils import CleanupItem + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +DATA_MODEL_JSON_LD = "https://raw.githubusercontent.com/Sage-Bionetworks/schematic/develop/tests/data/example.model.jsonld" + + +@pytest.fixture +def request_headers(syn_token: str) -> Dict[str, str]: + """Simple bearer token header for requests""" + headers = {"Authorization": "Bearer " + syn_token} + return headers + + +class TestManifestSubmission: + def validate_submitted_manifest_file( + self, + response: Any, + syn: Synapse, + download_location: str, + data_type: str, + schedule_for_cleanup: Callable[[CleanupItem], None], + ) -> None: + """ + Validates the manifest by downloading it, checking its properties, and ensuring the correct columns. + + Args: + response (Any): The response containing the manifest ID. + syn (Synapse): An instance of the Synapse client. + data_type (str): The data type used in manifest. + download_location (str): path to download location + schedule_for_cleanup (Callable[[CleanupItem], None]): Returns a closure that takes an item that should be scheduled for cleanup. + testing_config (ConfigurationForTesting): Confiugration for testing + """ + # Get the manifest ID from the response + try: + manifest_id = response.json() + except (ValueError, TypeError): + manifest_id = response.json + + # clean up + schedule_for_cleanup(CleanupItem(manifest_id)) + + # Load then manifest from synapse + manifest_data = syn.get( + manifest_id, + downloadLocation=download_location, + ifcollision="overwrite.local", + ) + # make sure that the file name of manifest is correct + assert ( + manifest_data["properties"]["name"] + == (f"synapse_storage_manifest_{data_type}.csv").lower() + ) + + # make sure that entity id and id columns were added + manifest_file_path = os.path.join( + download_location, manifest_data["properties"]["name"] + ) + manifest_submitted_df = pd.read_csv(manifest_file_path) + assert "entityId" in manifest_submitted_df.columns + assert "Id" in manifest_submitted_df.columns + + # make sure that Id column is not empty + assert manifest_submitted_df["Id"].notnull().all() + + def validate_submitted_manifest_table( + self, + syn: Synapse, + project_id: str, + data_type: str, + ) -> None: + """ + Validates the manifest table by checking if it was created in the parent project. + + Args: + syn (Synapse): An instance of the Synapse client. + project_id (str): The project ID where the table should be created. + data_type (str): The data type used in manifest. + """ + expected_table_name = f"{data_type}_synapse_storage_manifest_table".lower() + synapse_id = syn.findEntityId(parent=project_id, name=expected_table_name) + assert synapse_id is not None + + @pytest.mark.local_or_remote_api + def test_submit_record_based_test_manifest_file_only( + self, + helpers: Helpers, + download_location: str, + syn: Synapse, + schedule_for_cleanup: Callable[[CleanupItem], None], + testing_config: ConfigurationForTesting, + flask_client: FlaskClient, + request_headers: Dict[str, str], + ) -> None: + """Test that a record-based manifest can be submitted with the file_only and replace option + + Args: + helpers (Helpers): a pytest fixture + syn_token (str): synapse access token + syn (Synapse): synapse client + download_location (str): path to download location + schedule_for_cleanup (Callable[[CleanupItem], None]): Returns a closure that takes an item that should be scheduled for cleanup. + testing_config (ConfigurationForTesting): Confiugration for testing + flask_client (FlaskClient): Local flask client to use instead of API server. + request_headers (Dict[str, str]): Headers to use for the request + + We are validating the following: + - The submitted manifest has correct file name: synapse_storage_manifest_.csv + - The submitted manifest has column entityId and Id + - The submitted manifest has Id column that is not empty + """ + + url = f"{testing_config.schematic_api_server_url}/v1/model/submit" + data_type = "Biospecimen" + params = { + "schema_url": DATA_MODEL_JSON_LD, + "data_model_labels": "class_label", + "data_type": data_type, + "dataset_id": "syn63561474", + "manifest_record_type": "file_only", + "restrict_rules": "false", + "hide_blanks": "false", + "asset_view": "syn63561606", + "table_manipulation": "replace", + "table_column_names": "class_label", + "annotation_keys": "class_label", + "file_annotations_upload": "false", + } + + test_manifest_path = helpers.get_data_path( + "mock_manifests/mock_example_biospecimen_manifest.csv" + ) + + # THEN we expect a successful response + try: + response = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(test_manifest_path, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + headers=request_headers, + query_string=params, + data={"file_name": open(test_manifest_path, "rb")}, + ) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + assert response.status_code == 200 + self.validate_submitted_manifest_file( + response=response, + syn=syn, + data_type=data_type, + download_location=download_location, + schedule_for_cleanup=schedule_for_cleanup, + ) + + @pytest.mark.slow_test + @pytest.mark.local_or_remote_api + def test_submit_record_based_test_manifest_table_and_file( + self, + helpers: Helpers, + syn: Synapse, + download_location: str, + schedule_for_cleanup: Callable[[CleanupItem], None], + testing_config: ConfigurationForTesting, + flask_client: FlaskClient, + request_headers: Dict[str, str], + ) -> None: + """Test that a record-based manifest can be submitted with the table and file and replace option + + Args: + helpers (Helpers): a pytest fixture + syn (Synapse): synapse client + syn_token (str): synapse access token + download_location (str): path to download location + schedule_for_cleanup (Callable[[CleanupItem], None]): Returns a closure that takes an item that should be scheduled for cleanup. + testing_config (ConfigurationForTesting): Confiugration for testing + flask_client (FlaskClient): Local flask client to use instead of API server. + request_headers (Dict[str, str]): Headers to use for the request + + We are validating the following: + - The submitted manifest has correct file name: synapse_storage_manifest_.csv + - The submitted manifest has column entityId and Id + - The submitted manifest has Id column that is not empty + - The table gets created in the parent synapse project + """ + url = f"{testing_config.schematic_api_server_url}/v1/model/submit" + data_type = "Biospecimen" + project_id = "syn63561415" + dataset_id = "syn63561474" + asset_view = "syn63561606" + + params = { + "schema_url": DATA_MODEL_JSON_LD, + "data_model_labels": "class_label", + "data_type": data_type, + "dataset_id": dataset_id, + "manifest_record_type": "table_and_file", + "restrict_rules": "false", + "hide_blanks": "false", + "asset_view": asset_view, + "table_column_names": "class_label", + "annotation_keys": "class_label", + "file_annotations_upload": "false", + } + + test_manifest_path = helpers.get_data_path( + "mock_manifests/mock_example_biospecimen_manifest.csv" + ) + + # THEN we expect a successful response + try: + response = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(test_manifest_path, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + headers=request_headers, + query_string=params, + data={"file_name": open(test_manifest_path, "rb")}, + ) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + assert response.status_code == 200 + self.validate_submitted_manifest_file( + response=response, + syn=syn, + data_type=data_type, + download_location=download_location, + schedule_for_cleanup=schedule_for_cleanup, + ) + self.validate_submitted_manifest_table( + syn=syn, + project_id=project_id, + data_type=data_type, + ) + + def test_submit_file_based_test_manifest_file_only( + self, + helpers: Helpers, + download_location: str, + schedule_for_cleanup: Callable[[CleanupItem], None], + testing_config: ConfigurationForTesting, + flask_client: FlaskClient, + syn: Synapse, + request_headers: Dict[str, str], + ) -> None: + """Test that a file-based manifest can be submitted with the file_only and replace option + + Args: + helpers (Helpers): Utilities for testing + download_location (str): path to download location + schedule_for_cleanup (Callable[[CleanupItem], None]): Returns a closure that takes an item that should be scheduled for cleanup. + testing_config (ConfigurationForTesting): Confiugration for testing + flask_client (FlaskClient): Local flask client to use instead of API server. + syn (Synapse): synapse client + request_headers (Dict[str, str]): Headers to use for the request + + We are validating the following: + - The submitted manifest has correct file name: synapse_storage_manifest_.csv + - The submitted manifest has column entityId and Id + - The submitted manifest has Id column that is not empty + """ + url = f"{testing_config.schematic_api_server_url}/v1/model/submit" + data_type = "BulkRNA-seqAssay" + params = { + "schema_url": DATA_MODEL_JSON_LD, + "data_model_labels": "class_label", + "data_type": data_type, + "dataset_id": "syn63561911", + "manifest_record_type": "file_only", + "restrict_rules": "false", + "hide_blanks": "false", + "asset_view": "syn63561920", + "table_manipulation": "replace", + "table_column_names": "class_label", + "annotation_keys": "class_label", + "file_annotations_upload": "false", + } + + test_manifest_path = helpers.get_data_path( + "mock_manifests/mock_example_bulkrnaseq_manifest.csv" + ) + + # THEN we expect a successful response + response = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(test_manifest_path, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + headers=request_headers, + query_string=params, + data={"file_name": open(test_manifest_path, "rb")}, + ) + ) + + assert response.status_code == 200 + self.validate_submitted_manifest_file( + response=response, + syn=syn, + data_type=data_type, + download_location=download_location, + schedule_for_cleanup=schedule_for_cleanup, + ) + + @pytest.mark.local_or_remote_api + def test_submit_file_based_test_manifest_table_and_file( + self, + helpers: Helpers, + syn: Synapse, + download_location: str, + schedule_for_cleanup: Callable[[CleanupItem], None], + testing_config: ConfigurationForTesting, + flask_client: FlaskClient, + request_headers: Dict[str, str], + ) -> None: + """Test that a file-based manifest can be submitted with the table and file and replace option + + Args: + helpers (Helpers): a pytest fixture + syn (Synapse): synapse client + syn_token (str): synapse access token + download_location (str): path to download location + schedule_for_cleanup (Callable[[CleanupItem], None]): Returns a closure that takes an item that should be scheduled for cleanup. + testing_config (ConfigurationForTesting): Confiugration for testing + flask_client (FlaskClient): Local flask client to use instead of API server. + request_headers (Dict[str, str]): Headers to use for the request + + We are validating the following: + - The submitted manifest has correct file name: synapse_storage_manifest_.csv + - The submitted manifest has column entityId and Id + - The submitted manifest has Id column that is not empty + - The table gets created in the parent synapse project + """ + url = f"{testing_config.schematic_api_server_url}/v1/model/submit" + data_type = "BulkRNA-seqAssay" + project_id = "syn63561904" + dataset_id = "syn63561911" + asset_view = "syn63561920" + + params = { + "schema_url": DATA_MODEL_JSON_LD, + "data_model_labels": "class_label", + "data_type": data_type, + "dataset_id": dataset_id, + "manifest_record_type": "table_and_file", + "restrict_rules": "false", + "hide_blanks": "false", + "asset_view": asset_view, + "table_column_names": "class_label", + "annotation_keys": "class_label", + "file_annotations_upload": "false", + } + + test_manifest_path = helpers.get_data_path( + "mock_manifests/mock_example_bulkrnaseq_manifest.csv" + ) + + # THEN we expect a successful response + try: + response = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(test_manifest_path, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + headers=request_headers, + query_string=params, + data={"file_name": open(test_manifest_path, "rb")}, + ) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + assert response.status_code == 200 + self.validate_submitted_manifest_file( + response=response, + syn=syn, + data_type=data_type, + download_location=download_location, + schedule_for_cleanup=schedule_for_cleanup, + ) + self.validate_submitted_manifest_table( + syn=syn, + project_id=project_id, + data_type=data_type, + ) + + @pytest.mark.synapse_credentials_needed + @pytest.mark.submission + @pytest.mark.local_or_remote_api + def test_submit_nested_manifest_table_and_file_replace( + self, + flask_client: FlaskClient, + request_headers: Dict[str, str], + helpers: Helpers, + synapse_store: SynapseStorage, + testing_config: ConfigurationForTesting, + ) -> None: + """ + Testing submit manifest in a csv format as a table and a file. + + We are validating the following: + - The submission should be successful + - The file should be uploaded to Synapse with the new annotation + - The manifest should exist in the dataset folder + - The manifest table is created + - Submission works for a nested manifest + """ + # GIVEN the parameters to submit a manifest + data_type = "BulkRNA-seqAssay" + project_id = "syn23643250" + params = { + "schema_url": DATA_MODEL_JSON_LD, + "data_type": data_type, + "restrict_rules": False, + "manifest_record_type": "table_and_file", + "asset_view": "syn63646213", + "dataset_id": "syn63646197", + "table_manipulation": "replace", + "data_model_labels": "class_label", + "table_column_names": "display_name", + } + + # AND a test manifest with a nested file entity + nested_manifest_replace_csv = helpers.get_data_path( + "mock_manifests/TestManifestOperation_test_submit_nested_manifest_table_and_file_replace.csv" + ) + + # AND a randomized annotation we can verify was added + df = helpers.get_data_frame(path=nested_manifest_replace_csv) + randomized_annotation_content = str(uuid.uuid4()) + df["RandomizedAnnotation"] = randomized_annotation_content + + with tempfile.NamedTemporaryFile(delete=True, suffix=".csv") as tmp_file: + # Write the DF to a temporary file + df.to_csv(tmp_file.name, index=False) + + # WHEN I submit that manifest + url = f"{testing_config.schematic_api_server_url}/v1/model/submit" + try: + response_csv = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(tmp_file.name, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + headers=request_headers, + query_string=params, + data={"file_name": open(tmp_file.name, "rb")}, + ) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + # THEN the submission should be successful + assert response_csv.status_code == 200 + + # AND the file should be uploaded to Synapse with the new annotation + modified_file = synapse_store.syn.get(df["entityId"][0], downloadFile=False) + assert modified_file is not None + assert modified_file["RandomizedAnnotation"][0] == randomized_annotation_content + + # AND the manifest should exist in the dataset folder + manifest_synapse_id = synapse_store.syn.findEntityId( + name="synapse_storage_manifest_bulkrna-seqassay.csv", parent="syn63646197" + ) + assert manifest_synapse_id is not None + synapse_manifest_entity = synapse_store.syn.get( + entity=manifest_synapse_id, downloadFile=False + ) + assert synapse_manifest_entity is not None + assert ( + synapse_manifest_entity["_file_handle"]["fileName"] + == "synapse_storage_manifest_bulkrna-seqassay.csv" + ) + + # AND the manifest table is created + self.validate_submitted_manifest_table( + syn=synapse_store.syn, + project_id=project_id, + data_type=data_type, + ) + + @pytest.mark.synapse_credentials_needed + @pytest.mark.submission + @pytest.mark.local_or_remote_api + def test_submit_manifest_table_and_file_replace( + self, + flask_client: FlaskClient, + request_headers: Dict[str, str], + helpers: Helpers, + syn: Synapse, + testing_config: ConfigurationForTesting, + ) -> None: + """Testing submit manifest in a csv format as a table and a file. Only replace + the table. + + We are validating the following: + - The submission should be successful + - The manifest table is created + """ + # GIVEN the parameters to submit a manifest + data_type = "Biospecimen" + project_id = "syn23643250" + params = { + "schema_url": DATA_MODEL_JSON_LD, + "data_type": data_type, + "restrict_rules": False, + "hide_blanks": False, + "manifest_record_type": "table_and_file", + "asset_view": "syn51514344", + "dataset_id": "syn51514345", + "table_manipulation": "replace", + "data_model_labels": "class_label", + "table_column_names": "class_label", + } + + # AND a test manifest + test_manifest_submit = helpers.get_data_path( + "mock_manifests/example_biospecimen_test.csv" + ) + + # WHEN I submit that manifest + url = f"{testing_config.schematic_api_server_url}/v1/model/submit" + try: + response_csv = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(test_manifest_submit, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + query_string=params, + data={"file_name": (open(test_manifest_submit, "rb"), "test.csv")}, + headers=request_headers, + ) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + # THEN the submission should be successful + assert response_csv.status_code == 200 + self.validate_submitted_manifest_table( + syn=syn, + project_id=project_id, + data_type=data_type, + ) + + @pytest.mark.synapse_credentials_needed + @pytest.mark.submission + @pytest.mark.local_or_remote_api + @pytest.mark.parametrize( + "data_type", + [ + ("Biospecimen"), + ("MockComponent"), + ], + ) + def test_submit_manifest_file_only_replace( + self, + helpers: Helpers, + flask_client: FlaskClient, + request_headers: Dict[str, str], + data_type: str, + syn: Synapse, + testing_config: ConfigurationForTesting, + ) -> None: + """Testing submit manifest in a csv format as a file. + + We are validating the following: + - The submission should be successful + - The manifest table is created + """ + # GIVEN a test manifest + if data_type == "Biospecimen": + manifest_path = helpers.get_data_path( + "mock_manifests/example_biospecimen_test.csv" + ) + elif data_type == "MockComponent": + manifest_path = helpers.get_data_path( + "mock_manifests/Valid_Test_Manifest.csv" + ) + + # AND the parameters to submit a manifest + project_id = "syn23643250" + params = { + "schema_url": DATA_MODEL_JSON_LD, + "data_type": data_type, + "restrict_rules": False, + "manifest_record_type": "file_only", + "table_manipulation": "replace", + "data_model_labels": "class_label", + "table_column_names": "class_label", + } + + if data_type == "Biospecimen": + specific_params = { + "asset_view": "syn51514344", + "dataset_id": "syn51514345", + } + + elif data_type == "MockComponent": + python_version = helpers.get_python_version() + + if python_version == "3.10": + dataset_id = "syn52656106" + elif python_version == "3.9": + dataset_id = "syn52656104" + + specific_params = { + "asset_view": "syn23643253", + "dataset_id": dataset_id, + "project_scope": ["syn54126707"], + } + + params.update(specific_params) + + # WHEN I submit that manifest + url = f"{testing_config.schematic_api_server_url}/v1/model/submit" + response_csv = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(manifest_path, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + query_string=params, + data={"file_name": (open(manifest_path, "rb"), "test.csv")}, + headers=request_headers, + ) + ) + + # THEN the submission should be successful + assert response_csv.status_code == 200 + self.validate_submitted_manifest_table( + syn=syn, + project_id=project_id, + data_type=data_type, + ) + + @pytest.mark.synapse_credentials_needed + @pytest.mark.submission + @pytest.mark.local_or_remote_api + def test_submit_manifest_json_str_replace( + self, + flask_client: FlaskClient, + request_headers: Dict[str, str], + syn: Synapse, + testing_config: ConfigurationForTesting, + ) -> None: + """Submit json str as a file. + + + We are validating the following: + - The submission should be successful + - The manifest table is created + """ + # GIVEN a test json str + json_str = '[{"Sample ID": 123, "Patient ID": 1,"Tissue Status": "Healthy","Component": "Biospecimen"}]' + + # AND the parameters to submit a manifest + project_id = "syn23643250" + data_type = "Biospecimen" + params = { + "schema_url": DATA_MODEL_JSON_LD, + "data_type": data_type, + "json_str": json_str, + "restrict_rules": False, + "manifest_record_type": "file_only", + "asset_view": "syn51514344", + "dataset_id": "syn51514345", + "table_manipulation": "replace", + "data_model_labels": "class_label", + "table_column_names": "class_label", + } + params["json_str"] = json_str + + # WHEN I submit that manifest + url = f"{testing_config.schematic_api_server_url}/v1/model/submit" + try: + response = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": ""}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + query_string=params, + data={"file_name": ""}, + headers=request_headers, + ) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + # THEN the submission should be successful + assert response.status_code == 200 + self.validate_submitted_manifest_table( + syn=syn, + project_id=project_id, + data_type=data_type, + ) + + @pytest.mark.synapse_credentials_needed + @pytest.mark.submission + @pytest.mark.local_or_remote_api + def test_submit_manifest_w_file_and_entities( + self, + flask_client: FlaskClient, + request_headers: Dict[str, str], + helpers: Helpers, + syn: Synapse, + testing_config: ConfigurationForTesting, + ) -> None: + """Testing submit manifest in a csv format as a file and entities. + + + We are validating the following: + - The submission should be successful + - The manifest table is created + """ + # GIVEN the parameters to submit a manifest + project_id = "syn23643250" + data_type = "Biospecimen" + params = { + "schema_url": DATA_MODEL_JSON_LD, + "data_type": data_type, + "restrict_rules": False, + "manifest_record_type": "file_and_entities", + "asset_view": "syn51514501", + "dataset_id": "syn51514523", + "table_manipulation": "replace", + "data_model_labels": "class_label", + "table_column_names": "class_label", + "annotation_keys": "class_label", + } + test_manifest_submit = helpers.get_data_path( + "mock_manifests/example_biospecimen_test.csv" + ) + + # WHEN I submit that manifest + url = f"{testing_config.schematic_api_server_url}/v1/model/submit" + try: + response_csv = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(test_manifest_submit, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + query_string=params, + data={"file_name": (open(test_manifest_submit, "rb"), "test.csv")}, + headers=request_headers, + ) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + # THEN the submission should be successful + assert response_csv.status_code == 200 + self.validate_submitted_manifest_table( + syn=syn, + project_id=project_id, + data_type=data_type, + ) + + @pytest.mark.synapse_credentials_needed + @pytest.mark.submission + @pytest.mark.local_or_remote_api + def test_submit_manifest_table_and_file_upsert( + self, + flask_client: FlaskClient, + request_headers: Dict[str, str], + helpers: Helpers, + syn: Synapse, + testing_config: ConfigurationForTesting, + ) -> None: + """Testing submit manifest in a csv format as a table and a file. Upsert + the table. + + + We are validating the following: + - The submission should be successful + - The manifest table is created + """ + # GIVEN the parameters to submit a manifest + project_id = "syn23643250" + data_type = "MockRDB" + params = { + "schema_url": DATA_MODEL_JSON_LD, + "data_type": data_type, + "restrict_rules": False, + "manifest_record_type": "table_and_file", + "asset_view": "syn51514557", + "dataset_id": "syn51514551", + "table_manipulation": "upsert", + "data_model_labels": "class_label", + # have to set table_column_names to display_name to ensure upsert feature works + "table_column_names": "display_name", + } + + # AND a test manifest + test_upsert_manifest_csv = helpers.get_data_path( + "mock_manifests/rdb_table_manifest.csv" + ) + + # WHEN I submit that manifest + url = f"{testing_config.schematic_api_server_url}/v1/model/submit" + try: + response_csv = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(test_upsert_manifest_csv, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + query_string=params, + data={ + "file_name": (open(test_upsert_manifest_csv, "rb"), "test.csv") + }, + headers=request_headers, + ) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + # THEN the submission should be successful + assert response_csv.status_code == 200 + self.validate_submitted_manifest_table( + syn=syn, + project_id=project_id, + data_type=data_type, + ) + + @pytest.mark.synapse_credentials_needed + @pytest.mark.submission + @pytest.mark.local_or_remote_api + def test_submit_and_validate_filebased_manifest( + self, + flask_client: FlaskClient, + request_headers: Dict[str, str], + helpers: Helpers, + syn: Synapse, + testing_config: ConfigurationForTesting, + ) -> None: + """Testing submit manifest in a csv format as a file. + + + We are validating the following: + - The submission should be successful + - The manifest table is created + """ + # GIVEN the parameters to submit a manifest + project_id = "syn23643250" + data_type = "MockFilename" + params = { + "schema_url": DATA_MODEL_JSON_LD, + "data_type": data_type, + "restrict_rules": False, + "manifest_record_type": "file_and_entities", + "asset_view": "syn23643253", + "dataset_id": "syn62822337", + "project_scope": "syn23643250", + "dataset_scope": "syn62822337", + "data_model_labels": "class_label", + "table_column_names": "class_label", + } + + valid_filename_manifest_csv = helpers.get_data_path( + "mock_manifests/ValidFilenameManifest.csv" + ) + + # WHEN a filebased manifest is validated with the filenameExists rule and uploaded + url = f"{testing_config.schematic_api_server_url}/v1/model/submit" + try: + response_csv = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(valid_filename_manifest_csv, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + query_string=params, + data={ + "file_name": ( + open(valid_filename_manifest_csv, "rb"), + "test.csv", + ) + }, + headers=request_headers, + ) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + # THEN the validation and submission should be successful + assert response_csv.status_code == 200 + self.validate_submitted_manifest_table( + syn=syn, + project_id=project_id, + data_type=data_type, + ) + + @pytest.mark.synapse_credentials_needed + @pytest.mark.submission + @pytest.mark.local_or_remote_api + def test_submit_manifest_with_hide_blanks( + self, + flask_client: FlaskClient, + request_headers: Dict[str, str], + helpers: Helpers, + syn: Synapse, + testing_config: ConfigurationForTesting, + ) -> None: + """Testing submit manifest in a csv format as a table and a file. Hide blanks. + + + We are validating the following: + - The submission should be successful + - A randomized annotation should be added to the file + - The blank annotations are not present + """ + # GIVEN the parameters to submit a manifest + params = { + "schema_url": DATA_MODEL_JSON_LD, + "data_model_labels": "class_label", + "dataset_id": "syn63606804", + "manifest_record_type": "table_and_file", + "restrict_rules": "false", + "hide_blanks": "true", + "asset_view": "syn63561920", + "table_column_names": "class_label", + "annotation_keys": "class_label", + "file_annotations_upload": "true", + } + + # AND a test manifest + test_submit_manifest_with_hide_blanks_manifest = helpers.get_data_path( + "mock_manifests/TestManifestSubmission_test_submit_manifest_with_hide_blanks.csv" + ) + + # AND a randomized annotation we can verify was added + df = helpers.get_data_frame(path=test_submit_manifest_with_hide_blanks_manifest) + randomized_annotation_content = str(uuid.uuid4()) + df["RandomizedAnnotation"] = randomized_annotation_content + + with tempfile.NamedTemporaryFile(delete=True, suffix=".csv") as tmp_file: + # Write the DF to a temporary file + df.to_csv(tmp_file.name, index=False) + + # WHEN the manifest is submitted + url = f"{testing_config.schematic_api_server_url}/v1/model/submit" + try: + response_csv = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(tmp_file.name, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + query_string=params, + data={"file_name": (open(tmp_file.name, "rb"), "test.csv")}, + headers=request_headers, + ) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + # THEN the validation and submission should be successful + assert response_csv.status_code == 200 + + # AND the randomized annotation should be added to the file + modified_file = syn.get(df["entityId"][0], downloadFile=False) + assert modified_file is not None + assert modified_file["RandomizedAnnotation"][0] == randomized_annotation_content + + # AND the blank annotations are not present + assert "Genome Build" not in modified_file + assert "Genome FASTA" not in modified_file + + @pytest.mark.synapse_credentials_needed + @pytest.mark.submission + @pytest.mark.local_or_remote_api + def test_submit_manifest_with_blacklisted_characters( + self, + flask_client: FlaskClient, + request_headers: Dict[str, str], + helpers: Helpers, + syn: Synapse, + testing_config: ConfigurationForTesting, + ) -> None: + """Testing submit manifest in a csv format as a table and a file. + Blacklisted characters. + + + We are validating the following: + - The submission should be successful + - Annotation with blacklisted characters should not be present + - Annotation with the stripped blacklisted characters should be present + """ + # GIVEN the parameters to submit a manifest + params = { + "schema_url": DATA_MODEL_JSON_LD, + "data_model_labels": "class_label", + "dataset_id": "syn63607040", + "manifest_record_type": "table_and_file", + "restrict_rules": "false", + "hide_blanks": "true", + "asset_view": "syn63561920", + "table_column_names": "display_label", + "annotation_keys": "display_label", + "file_annotations_upload": "true", + } + + # AND a test manifest + test_submit_manifest_with_blacklisted_characters = helpers.get_data_path( + "mock_manifests/TestManifestSubmission_test_submit_manifest_with_blacklisted_characters.csv" + ) + df = helpers.get_data_frame( + path=test_submit_manifest_with_blacklisted_characters + ) + + # WHEN the manifest is submitted + url = f"{testing_config.schematic_api_server_url}/v1/model/submit" + try: + response_csv = ( + requests.post( + url, + headers=request_headers, + params=params, + files={ + "file_name": open( + test_submit_manifest_with_blacklisted_characters, "rb" + ) + }, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + query_string=params, + data={ + "file_name": ( + open( + test_submit_manifest_with_blacklisted_characters, "rb" + ), + "test.csv", + ) + }, + headers=request_headers, + ) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + # THEN the validation and submission should be successful + assert response_csv.status_code == 200 + + # AND the randomized annotation should be added to the file + modified_file = syn.get(df["entityId"][0], downloadFile=False) + assert modified_file is not None + + # AND the blacklisted characters are not present + assert "File-Format" not in modified_file + + # AND the stripped non-blacklisted characters are present + assert "FileFormat" in modified_file diff --git a/tests/integration/test_manifest_validation.py b/tests/integration/test_manifest_validation.py new file mode 100644 index 000000000..67d4fa9d1 --- /dev/null +++ b/tests/integration/test_manifest_validation.py @@ -0,0 +1,592 @@ +""" +This module is responsible for running through the "Manifest Validation" portion of +the schematic API test plan found here: . +""" + +import json +from typing import Dict + +import pytest +import requests +from flask.testing import FlaskClient + +from tests.conftest import ConfigurationForTesting, Helpers +from schematic.configuration.configuration import CONFIG + +EXAMPLE_SCHEMA_URL = "https://raw.githubusercontent.com/Sage-Bionetworks/schematic/develop/tests/data/example.model.jsonld" + + +@pytest.fixture +def request_headers(syn_token: str) -> Dict[str, str]: + """Simple bearer token header for requests""" + headers = {"Authorization": "Bearer " + syn_token} + return headers + + +class TestManifestValidation: + @pytest.mark.local_or_remote_api + @pytest.mark.parametrize( + ("input_data_type", "input_file_name"), + [ + ("Biospecimen", "mock_manifests/example_biospecimen_test.csv"), + ( + "Patient", + "mock_manifests/TestManifestValidation_test_manifest_validation_basic_valid.csv", + ), + ], + ) + def test_manifest_validation_basic_valid( + self, + input_data_type: str, + input_file_name: str, + flask_client: FlaskClient, + request_headers: Dict[str, str], + testing_config: ConfigurationForTesting, + helpers: Helpers, + ) -> None: + """ + Test that the manifest validation API returns no errors when a valid manifest is provided. + + We are validating the following: + + When we upload a valid manifest file to the /v1/model/validate endpoint... + - The response is successful (Response Code 200) + - The response content includes an 'errors' and 'warnings' key + - There are NO error values for the 'errors' key + - There are NO warnings values for the 'warnings' key + + """ + # GIVEN the manifest validation endpoint and parameters + url = f"{testing_config.schematic_api_server_url}/v1/model/validate" + params = { + "schema_url": EXAMPLE_SCHEMA_URL, + "data_type": input_data_type, + "data_model_labels": "class_label", + "restrict_rules": False, + } + + # AND a valid file + file_path = helpers.get_data_path(input_file_name) + + # WHEN we make a POST request to validate the file + response = ( + requests.post( + url, + params=params, + files={"file_name": open(file_path, "rb")}, + headers=request_headers, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + headers=request_headers, + query_string=params, + data={"file_name": open(file_path, "rb")}, + ) + ) + + # THEN we expect a successful response + assert ( + response.status_code == 200 + ), f"Got status code: {response.status_code}. Expected '200'." + + # AND with expected keys in the json + response_json = ( + response.json() + if testing_config.use_deployed_schematic_api_server + else response.json + ) + assert "warnings" in response_json.keys() + assert "errors" in response_json.keys() + + # AND with no expected errors + assert len(response_json.get("errors")) == 0 + + # AND with no expected warnings + assert len(response_json.get("warnings")) == 0 + + @pytest.mark.local_or_remote_api + @pytest.mark.parametrize( + ("input_data_type", "input_file_name"), + [ + ( + "Patient", + "mock_manifests/TestManifestValidation_test_patient_manifest_invalid.csv", + ), + ], + ) + def test_manifest_validation_basic_invalid( + self, + input_data_type: str, + input_file_name: str, + flask_client: FlaskClient, + request_headers: Dict[str, str], + testing_config: ConfigurationForTesting, + helpers: Helpers, + ) -> None: + """ + Test that the manifest validation API returns errors when an invalid manifest is provided. + + We are validating the following: + + When we upload an invalid manifest file to the /v1/model/validate endpoint... + - The response is successful (Response Code 200) + - The response content includes an 'errors' and 'warnings' key + - There is at least 1 error value for the 'errors' key + - The error value(s) for the 'errors' key matches up with the ``expected_errors`` list + + """ + # GIVEN the manifest validation endpoint and parameters + url = f"{testing_config.schematic_api_server_url}/v1/model/validate" + params = { + "schema_url": EXAMPLE_SCHEMA_URL, + "data_type": input_data_type, + "data_model_labels": "class_label", + "restrict_rules": False, + } + + # AND an invalid file + file = helpers.get_data_path(input_file_name) + + # WHEN we make a POST request to validate the file + response = ( + requests.post( + url, + params=params, + files={"file_name": open(file, "rb")}, + headers=request_headers, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + headers=request_headers, + query_string=params, + data={"file_name": open(file, "rb")}, + ) + ) + response_json = ( + response.json() + if testing_config.use_deployed_schematic_api_server + else response.json + ) + + # THEN we expect a successful response + assert ( + response.status_code == 200 + ), f"Should be 200 status code. Got {response.status_code}" + + # AND with expected keys in the json + assert ( + "warnings" in response_json.keys() + ), f"Expected 'warnings' in response json. Got {response_json.keys()}" + assert ( + "errors" in response_json.keys() + ), f"Expected 'errors' in response json. Got {response_json.keys()}" + + # AND with the expected error + assert ( + len(response_json.get("errors")) > 0 + ), "Expected at least one error. Got none." + + # AND with the expected error message + expected_errors = [ + [ + "2", + "Family History", + "For attribute Family History in row 2 it does not appear as if you provided a comma delimited string. Please check your entry ('Random'') and try again.", + "Random", + ], + [ + "2", + "Family History", + # Truncating the rest of the message because order of the list is not guaranteed + "'Random' is not one of [", + "Random", + ], + [ + "2", + "Cancer Type", + # Truncating the rest of the message because order of the list is not guaranteed + "'Random' is not one of [", + "Random", + ], + ] + + response_errors = response_json.get("errors") + + for response_error in response_errors: + assert any( + response_error[0] == expected_error[0] + and response_error[1] == expected_error[1] + and response_error[2].startswith(expected_error[2]) + and response_error[3] == expected_error[3] + for expected_error in expected_errors + ) + if response_error[2].startswith("'Random' is not one of"): + assert "Lung" in response_error[2] + assert "Breast" in response_error[2] + assert "Prostate" in response_error[2] + assert "Colorectal" in response_error[2] + assert "Skin" in response_error[2] + + @pytest.mark.local_or_remote_api + def test_cross_manifest_validation_with_no_target( + self, + flask_client: FlaskClient, + request_headers: Dict[str, str], + testing_config: ConfigurationForTesting, + helpers: Helpers, + ) -> None: + """ + Test that the manifest validation API returns warnings when cross validation is triggered + with no target provided. + + We are validating the following: + + When we upload a valid manifest file that triggers cross-manifest validation rules + to the /v1/model/validate endpoint... + - The response is successful (Response Code 200) + - The response content includes a 'warnings' key + - The warning value(s) for the 'warnings' key matches up with the ``expected_warnings`` list + + """ + # GIVEN the manifest validation endpoint and parameters + url = f"{testing_config.schematic_api_server_url}/v1/model/validate" + params = { + "schema_url": EXAMPLE_SCHEMA_URL, + "data_type": "MockComponent", + "data_model_labels": "class_label", + "restrict_rules": False, + "asset_view": "syn63825013", + } + + # AND a manifest that triggers cross-manifest validation rules + input_file_name = "mock_manifests/MockComponent-cross-manifest-1.csv" + file_path = helpers.get_data_path(input_file_name) + + # AND a list of expected warnings from the POST request + expected_warnings = [ + [ + None, + "Check Recommended", + "Column Check Recommended is recommended but empty.", + None, + ], + [ + None, + "Check Match at Least", + "Cross Manifest Validation Warning: There are no target columns to validate this manifest against for attribute: Check Match at Least, and validation rule: matchAtLeastOne Patient.PatientID set. It is assumed this is the first manifest in a series to be submitted, so validation will pass, for now, and will run again when there are manifests uploaded to validate against.", + None, + ], + [ + None, + "Check Match at Least values", + "Cross Manifest Validation Warning: There are no target columns to validate this manifest against for attribute: Check Match at Least values, and validation rule: matchAtLeastOne MockComponent.checkMatchatLeastvalues value. It is assumed this is the first manifest in a series to be submitted, so validation will pass, for now, and will run again when there are manifests uploaded to validate against.", + None, + ], + [ + None, + "Check Match Exactly", + "Cross Manifest Validation Warning: There are no target columns to validate this manifest against for attribute: Check Match Exactly, and validation rule: matchExactlyOne MockComponent.checkMatchExactly set. It is assumed this is the first manifest in a series to be submitted, so validation will pass, for now, and will run again when there are manifests uploaded to validate against.", + None, + ], + [ + None, + "Check Match Exactly values", + "Cross Manifest Validation Warning: There are no target columns to validate this manifest against for attribute: Check Match Exactly values, and validation rule: matchExactlyOne MockComponent.checkMatchExactlyvalues value. It is assumed this is the first manifest in a series to be submitted, so validation will pass, for now, and will run again when there are manifests uploaded to validate against.", + None, + ], + [ + None, + "Check Match None", + "Cross Manifest Validation Warning: There are no target columns to validate this manifest against for attribute: Check Match None, and validation rule: matchNone MockComponent.checkMatchNone set error. It is assumed this is the first manifest in a series to be submitted, so validation will pass, for now, and will run again when there are manifests uploaded to validate against.", + None, + ], + [ + None, + "Check Match None values", + "Cross Manifest Validation Warning: There are no target columns to validate this manifest against for attribute: Check Match None values, and validation rule: matchNone MockComponent.checkMatchNonevalues value error. It is assumed this is the first manifest in a series to be submitted, so validation will pass, for now, and will run again when there are manifests uploaded to validate against.", + None, + ], + ] + + # AND we make a POST request to validate the file + response = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(file_path, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + headers=request_headers, + query_string=params, + data={"file_name": open(file_path, "rb")}, + ) + ) + + # THEN we expect a successful response + assert ( + response.status_code == 200 + ), f"Should be 200 status code. Got {response.status_code}" + + # AND the response should contain the expected warnings + content = ( + response.content + if testing_config.use_deployed_schematic_api_server + else response.data + ).decode("utf-8") + data = json.loads(content) + warnings = data.get("warnings", []) + + for idx, expected_idx in zip(warnings, expected_warnings): + assert idx == expected_idx + + @pytest.mark.local_or_remote_api + def test_cross_manifest_validation_with_target( + self, + flask_client: FlaskClient, + request_headers: Dict[str, str], + testing_config: ConfigurationForTesting, + helpers: Helpers, + ) -> None: + """ + Test that the manifest validation API returns warnings when a manifest target is provided. + + We are validating the following: + + When we upload a valid manifest file that triggers cross-manifest validation rules + to the /v1/model/validate endpoint and a target is provided... + - The response is successful (Response Code 200) + - The response content includes a 'warnings' key + - The warning value(s) for the 'warnings' key matches up with the ``expected_warnings`` list + + """ + # WHEN a manifest file has been uploaded to the Synapse project + # the manifest validation endpoint and parameters are given + url = f"{testing_config.schematic_api_server_url}/v1/model/validate" + params = { + "schema_url": "https://raw.githubusercontent.com/Sage-Bionetworks/schematic/develop/tests/data/example.model.jsonld", + "data_type": "MockComponent", + "data_model_labels": "class_label", + "restrict_rules": False, + "asset_view": "syn63596704", + "project_scope": "syn63582791", + } + + # AND a list of expected warnings is given + expected_warnings = [ + [ + None, + "Check Recommended", + "Column Check Recommended is recommended but empty.", + None, + ], + [ + None, + "Check Match at Least", + "Cross Manifest Validation Warning: There are no target columns to validate this manifest against for attribute: Check Match at Least, and validation rule: matchAtLeastOne Patient.PatientID set. It is assumed this is the first manifest in a series to be submitted, so validation will pass, for now, and will run again when there are manifests uploaded to validate against.", + None, + ], + ] + + # AND a file to be uploaded for validation is defined + input_file = "mock_manifests/MockComponent-cross-manifest-2.csv" + input_file_path = helpers.get_data_path(input_file) + + # AND we make a POST request to validate the file + try: + response = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(input_file_path, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + headers=request_headers, + query_string=params, + data={"file_name": open(input_file_path, "rb")}, + ) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + # THEN we expect a successful response + assert ( + response.status_code == 200 + ), f"Should be 200 status code. Got {response.status_code}" + + # AND the response should contain the expected warnings + content = ( + response.content + if testing_config.use_deployed_schematic_api_server + else response.data + ).decode("utf-8") + data = json.loads(content) + warnings = data.get("warnings", []) + + for idx, expected_idx in zip(warnings, expected_warnings): + assert idx == expected_idx + + @pytest.mark.local_or_remote_api + def test_manifest_validation_with_rule_combination( + self, + flask_client: FlaskClient, + request_headers: Dict[str, str], + testing_config: ConfigurationForTesting, + helpers: Helpers, + ) -> None: + """ + Test that the manifest validation API returns the expected warnings and errors when + simple rule combination validation rules are triggered. + + We are validating the following: + + When we upload a valid manifest file that triggers rule combination validation rules + to the /v1/model/validate endpoint... + - The response is successful (Response Code 200) + - The response content includes an 'errors' and 'warnings' key + - The error value(s) for the 'errors' key matches up with the errors in the ``expected_contents`` list + - The warning value(s) for the 'warnings' key matches up with the warnings in the ``expected_contents`` list + + """ + # GIVEN the manifest validation endpoint and parameters + url = f"{testing_config.schematic_api_server_url}/v1/model/validate" + params = { + "schema_url": EXAMPLE_SCHEMA_URL, + "data_type": "MockComponent", + "data_model_labels": "class_label", + "restrict_rules": False, + "asset_view": "syn63622565", + } + + # AND a file to be uploaded for validation is defined + input_file = "mock_manifests/Mock_Component_rule_combination.csv" + input_file_path = helpers.get_data_path(input_file) + + # AND we make a POST request to validate the file + try: + response = ( + requests.post( + url, + headers=request_headers, + params=params, + files={"file_name": open(input_file_path, "rb")}, + timeout=300, + ) + if testing_config.use_deployed_schematic_api_server + else flask_client.post( + url, + headers=request_headers, + query_string=params, + data={"file_name": open(input_file_path, "rb")}, + ) + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") + + # AND the expected response contents is given + expected_contents = { + "errors": [ + [ + "2", + "Check Regex List", + 'For the attribute Check Regex List, on row 2, the string is not properly formatted. It should follow the following re.match pattern "[a-f]".', + ["a", "b", "c", "d", "e", "f", "g", "h"], + ], + [ + "2", + "Check Regex List", + 'For the attribute Check Regex List, on row 2, the string is not properly formatted. It should follow the following re.match pattern "[a-f]".', + ["a", "b", "c", "d", "e", "f", "g", "h"], + ], + [ + "4", + "Check Regex List Like", + 'For the attribute Check Regex List Like, on row 4, the string is not properly formatted. It should follow the following re.match pattern "[a-f]".', + ["a", "c", "h"], + ], + [ + "2", + "Check Regex List Strict", + "For attribute Check Regex List Strict in row 2 it does not appear as if you provided a comma delimited string. Please check your entry ('a'') and try again.", + "a", + ], + [ + "4", + "Check Regex List Strict", + 'For the attribute Check Regex List Strict, on row 4, the string is not properly formatted. It should follow the following re.match pattern "[a-f]".', + ["a", "b", "h"], + ], + ["2", "Check NA", "'' should be non-empty", ""], + ], + "warnings": [ + [ + None, + "Check Recommended", + "Column Check Recommended is recommended but empty.", + None, + ], + [ + None, + "Check Match at Least", + "Cross Manifest Validation Warning: There are no target columns to validate this manifest against for attribute: Check Match at Least, and validation rule: matchAtLeastOne Patient.PatientID set. It is assumed this is the first manifest in a series to be submitted, so validation will pass, for now, and will run again when there are manifests uploaded to validate against.", + None, + ], + [ + None, + "Check Match at Least values", + "Cross Manifest Validation Warning: There are no target columns to validate this manifest against for attribute: Check Match at Least values, and validation rule: matchAtLeastOne MockComponent.checkMatchatLeastvalues value. It is assumed this is the first manifest in a series to be submitted, so validation will pass, for now, and will run again when there are manifests uploaded to validate against.", + None, + ], + [ + None, + "Check Match Exactly", + "Cross Manifest Validation Warning: There are no target columns to validate this manifest against for attribute: Check Match Exactly, and validation rule: matchExactlyOne MockComponent.checkMatchExactly set. It is assumed this is the first manifest in a series to be submitted, so validation will pass, for now, and will run again when there are manifests uploaded to validate against.", + None, + ], + [ + None, + "Check Match Exactly values", + "Cross Manifest Validation Warning: There are no target columns to validate this manifest against for attribute: Check Match Exactly values, and validation rule: matchExactlyOne MockComponent.checkMatchExactlyvalues value. It is assumed this is the first manifest in a series to be submitted, so validation will pass, for now, and will run again when there are manifests uploaded to validate against.", + None, + ], + [ + None, + "Check Match None", + "Cross Manifest Validation Warning: There are no target columns to validate this manifest against for attribute: Check Match None, and validation rule: matchNone MockComponent.checkMatchNone set error. It is assumed this is the first manifest in a series to be submitted, so validation will pass, for now, and will run again when there are manifests uploaded to validate against.", + None, + ], + [ + None, + "Check Match None values", + "Cross Manifest Validation Warning: There are no target columns to validate this manifest against for attribute: Check Match None values, and validation rule: matchNone MockComponent.checkMatchNonevalues value error. It is assumed this is the first manifest in a series to be submitted, so validation will pass, for now, and will run again when there are manifests uploaded to validate against.", + None, + ], + ], + } + + # THEN we expect a successful response + assert ( + response.status_code == 200 + ), f"Should be 200 status code. Got {response.status_code}" + + # AND the response should match the expected response + content = ( + response.content + if testing_config.use_deployed_schematic_api_server + else response.data + ).decode("utf-8") + content_dict = json.loads(content) + assert content_dict == expected_contents diff --git a/tests/integration/test_metadata_model.py b/tests/integration/test_metadata_model.py index 6aa15bf18..2178a83b8 100644 --- a/tests/integration/test_metadata_model.py +++ b/tests/integration/test_metadata_model.py @@ -8,60 +8,57 @@ of the `SynapseStorage` class, which is responsible for handling file uploads and annotations in Synapse. """ - +import asyncio import logging -import pytest import tempfile - +import uuid from contextlib import nullcontext as does_not_raise +from typing import Callable, Optional +import pandas as pd +import pytest from pytest_mock import MockerFixture +from synapseclient import Annotations +from synapseclient.core import utils +from synapseclient.models import File, Folder + from schematic.store.synapse import SynapseStorage -from tests.conftest import metadata_model +from schematic.utils.general import create_temp_folder +from tests.conftest import Helpers, metadata_model +from tests.utils import CleanupItem logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) +DESCRIPTION = "This is an example file." +CONTENT_TYPE = "text/plain" +VERSION_COMMENT = "My version comment" + + +def file_instance() -> File: + """Creates a file instance with random content, used for manifests to be able to + point to real Synapse entities during each test run. The parent folder these are + created in is cleaned up post test run.""" + filename = utils.make_bogus_uuid_file() + return File( + path=filename, + description=DESCRIPTION, + content_type=CONTENT_TYPE, + version_comment=VERSION_COMMENT, + version_label=str(uuid.uuid4()), + ) + class TestMetadataModel: - # Define the test cases as a class attribute - test_cases = [ - # Test 1: Check that a valid manifest can be submitted, and corresponding entities annotated from it - ( - "mock_manifests/filepath_submission_test_manifest.csv", - "syn62276880", - None, - "syn62280543", - "syn53011753", - None, - ), - # Test 2: Change the Sample ID annotation from the previous test to ensure the manifest file is getting updated - ( - "mock_manifests/filepath_submission_test_manifest_sampleidx10.csv", - "syn62276880", - None, - "syn62280543", - "syn53011753", - None, - ), - # Test 3: Test manifest file upload with validation based on the MockFilename component and given dataset_scope - ( - "mock_manifests/ValidFilenameManifest.csv", - "syn62822337", - "MockFilename", - "syn62822975", - "syn63192751", - "syn62822337", - ), - ] + """Test suite for verifying the submission and annotation of file-based manifests.""" def validate_manifest_annotations( self, - manifest_annotations, - manifest_entity_type, - expected_entity_id, - manifest_file_contents=None, - ): + manifest_annotations: Annotations, + manifest_entity_type: str, + expected_entity_id: str, + manifest_file_contents: pd.DataFrame = None, + ) -> None: """ Validates that the annotations on a manifest entity (file or table) were correctly updated by comparing the annotations on the manifest entity with the contents of the manifest file itself, @@ -96,88 +93,381 @@ def validate_manifest_annotations( == manifest_file_contents[annotation].unique() ) - @pytest.mark.parametrize( - "manifest_path, dataset_id, validate_component, expected_manifest_id, " - "expected_table_id, dataset_scope", - test_cases, - ) - def test_submit_filebased_manifest_file_and_entities( + @pytest.mark.single_process_execution + async def test_submit_filebased_manifest_file_and_entities_valid_manifest_submitted( self, - helpers, - manifest_path, - dataset_id, - validate_component, - expected_manifest_id, - expected_table_id, - dataset_scope, + helpers: Helpers, mocker: MockerFixture, - synapse_store, + synapse_store: SynapseStorage, + schedule_for_cleanup: Callable[[CleanupItem], None], ): - self._submit_and_verify_manifest( - helpers=helpers, - mocker=mocker, - synapse_store=synapse_store, - manifest_path=manifest_path, - dataset_id=dataset_id, - expected_manifest_id=expected_manifest_id, - expected_table_id=expected_table_id, - manifest_record_type="file_and_entities", - validate_component=validate_component, - dataset_scope=dataset_scope, - ) + # GIVEN a project that exists in Synapse + project_id = "syn23643250" - @pytest.mark.parametrize( - "manifest_path, dataset_id, validate_component, expected_manifest_id, " - "expected_table_id, dataset_scope", - test_cases, - ) - def test_submit_filebased_manifest_table_and_file( + # AND a dataset/files that exist in Synapse + dataset_folder = await Folder( + name=f"test_submit_filebased_manifest_file_and_entities_valid_manifest_submitted_{uuid.uuid4()}", + files=[file_instance(), file_instance()], + parent_id=project_id, + ).store_async(synapse_client=synapse_store.syn) + schedule_for_cleanup(CleanupItem(synapse_id=dataset_folder.id)) + # Wait for the fileview to be updated + await asyncio.sleep(10) + + # AND a CSV file on disk + filenames = [ + f"schematic - main/{dataset_folder.name}/{file.name}" + for file in dataset_folder.files + ] + entity_ids = [file.id for file in dataset_folder.files] + random_uuids = [str(uuid.uuid4()) for _ in range(len(filenames))] + data = { + "Filename": filenames, + "Sample ID": random_uuids, + "File Format": ["" for _ in range(len(filenames))], + "Component": ["BulkRNA-seqAssay" for _ in range(len(filenames))], + "Genome Build": ["" for _ in range(len(filenames))], + "Genome FASTA": ["" for _ in range(len(filenames))], + "Id": random_uuids, + "entityId": entity_ids, + } + df = pd.DataFrame(data) + + with tempfile.NamedTemporaryFile( + delete=True, + suffix=".csv", + dir=create_temp_folder(path=tempfile.gettempdir()), + ) as tmp_file: + df.to_csv(tmp_file.name, index=False) + + # WHEN the manifest is submitted (Assertions are handled in the helper method) + self._submit_and_verify_manifest( + helpers=helpers, + mocker=mocker, + synapse_store=synapse_store, + manifest_path=tmp_file.name, + dataset_id=dataset_folder.id, + manifest_record_type="file_and_entities", + validate_component=None, + dataset_scope=None, + expected_table_id=None, + expected_table_name="bulkrna-seqassay_synapse_storage_manifest_table", + project_id=project_id, + expected_manifest_id=None, + expected_manifest_name="synapse_storage_manifest_bulkrna-seqassay.csv", + ) + + # AND when the annotatsions are updated and the manifest is resubmitted + with tempfile.NamedTemporaryFile( + delete=True, + suffix=".csv", + dir=create_temp_folder(path=tempfile.gettempdir()), + ) as tmp_file: + random_uuids = [str(uuid.uuid4()) for _ in range(len(filenames))] + df["Sample ID"] = random_uuids + df["Id"] = random_uuids + df.to_csv(tmp_file.name, index=False) + + # THEN the annotations are updated + self._submit_and_verify_manifest( + helpers=helpers, + mocker=mocker, + synapse_store=synapse_store, + manifest_path=tmp_file.name, + dataset_id=dataset_folder.id, + manifest_record_type="file_and_entities", + validate_component=None, + dataset_scope=None, + expected_table_id=None, + expected_table_name="bulkrna-seqassay_synapse_storage_manifest_table", + project_id=project_id, + expected_manifest_id=None, + expected_manifest_name="synapse_storage_manifest_bulkrna-seqassay.csv", + already_spied=True, + ) + + @pytest.mark.single_process_execution + async def test_submit_filebased_manifest_file_and_entities_mock_filename( self, - helpers, - manifest_path, - dataset_id, - validate_component, - expected_manifest_id, - expected_table_id, - dataset_scope, + helpers: Helpers, mocker: MockerFixture, - synapse_store, + synapse_store: SynapseStorage, + schedule_for_cleanup: Callable[[CleanupItem], None], ): - self._submit_and_verify_manifest( - helpers=helpers, - mocker=mocker, - synapse_store=synapse_store, - manifest_path=manifest_path, - dataset_id=dataset_id, - expected_manifest_id=expected_manifest_id, - expected_table_id=expected_table_id, - manifest_record_type="table_and_file", - validate_component=validate_component, - dataset_scope=dataset_scope, - ) + # GIVEN a project that exists in Synapse + project_id = "syn23643250" + + # AND a dataset/files that exist in Synapse + dataset_folder = await Folder( + name=f"test_submit_filebased_manifest_file_and_entities_mock_filename_{uuid.uuid4()}", + files=[file_instance(), file_instance()], + parent_id=project_id, + ).store_async(synapse_client=synapse_store.syn) + schedule_for_cleanup(CleanupItem(synapse_id=dataset_folder.id)) + # Wait for the fileview to be updated + await asyncio.sleep(10) + + # AND a CSV file on disk + filenames = [ + f"schematic - main/{dataset_folder.name}/{file.name}" + for file in dataset_folder.files + ] + entity_ids = [file.id for file in dataset_folder.files] + random_uuids = [str(uuid.uuid4()) for _ in range(len(filenames))] + data = { + "Filename": filenames, + "Sample ID": random_uuids, + "Id": random_uuids, + "Component": ["MockFilename" for _ in range(len(filenames))], + "entityId": entity_ids, + } + df = pd.DataFrame(data) + + with tempfile.NamedTemporaryFile( + delete=True, + suffix=".csv", + dir=create_temp_folder(path=tempfile.gettempdir()), + ) as tmp_file: + df.to_csv(tmp_file.name, index=False) + + # WHEN the manifest is submitted (Assertions are handled in the helper method) + self._submit_and_verify_manifest( + helpers=helpers, + mocker=mocker, + synapse_store=synapse_store, + manifest_path=tmp_file.name, + dataset_id=dataset_folder.id, + manifest_record_type="file_and_entities", + validate_component="MockFilename", + dataset_scope=dataset_folder.id, + expected_table_id=None, + expected_table_name="mockfilename_synapse_storage_manifest_table", + project_id=project_id, + expected_manifest_id=None, + expected_manifest_name="synapse_storage_manifest_mockfilename.csv", + ) + + @pytest.mark.single_process_execution + async def test_submit_filebased_manifest_table_and_file_valid_manifest_submitted( + self, + helpers: Helpers, + mocker: MockerFixture, + synapse_store: SynapseStorage, + schedule_for_cleanup: Callable[[CleanupItem], None], + ) -> None: + # GIVEN a project that exists in Synapse + project_id = "syn23643250" + + # AND a dataset/files that exist in Synapse + dataset_folder = await Folder( + name=f"test_submit_filebased_manifest_table_and_file_valid_manifest_submitted_{uuid.uuid4()}", + files=[file_instance(), file_instance()], + parent_id=project_id, + ).store_async(synapse_client=synapse_store.syn) + schedule_for_cleanup(CleanupItem(synapse_id=dataset_folder.id)) + # Wait for the fileview to be updated + await asyncio.sleep(10) + + # AND a CSV file on disk + filenames = [ + f"schematic - main/{dataset_folder.name}/{file.name}" + for file in dataset_folder.files + ] + entity_ids = [file.id for file in dataset_folder.files] + random_uuids = [str(uuid.uuid4()) for _ in range(len(filenames))] + data = { + "Filename": filenames, + "Sample ID": random_uuids, + "File Format": ["" for _ in range(len(filenames))], + "Component": ["BulkRNA-seqAssay" for _ in range(len(filenames))], + "Genome Build": ["" for _ in range(len(filenames))], + "Genome FASTA": ["" for _ in range(len(filenames))], + "Id": random_uuids, + "entityId": entity_ids, + } + df = pd.DataFrame(data) + + with tempfile.NamedTemporaryFile( + delete=True, + suffix=".csv", + dir=create_temp_folder(path=tempfile.gettempdir()), + ) as tmp_file: + df.to_csv(tmp_file.name, index=False) + + # WHEN the manifest is submitted (Assertions are handled in the helper method) + self._submit_and_verify_manifest( + helpers=helpers, + mocker=mocker, + synapse_store=synapse_store, + manifest_path=tmp_file.name, + dataset_id=dataset_folder.id, + manifest_record_type="table_and_file", + validate_component=None, + dataset_scope=None, + # Find by name instead of ID + expected_table_id=None, + expected_table_name="bulkrna-seqassay_synapse_storage_manifest_table", + project_id=project_id, + # Find by name instead of ID + expected_manifest_id=None, + expected_manifest_name="synapse_storage_manifest_bulkrna-seqassay.csv", + ) + + # AND when the annotations are updated and the manifest is resubmitted + with tempfile.NamedTemporaryFile( + delete=True, + suffix=".csv", + dir=create_temp_folder(path=tempfile.gettempdir()), + ) as tmp_file: + random_uuids = [str(uuid.uuid4()) for _ in range(len(filenames))] + df["Sample ID"] = random_uuids + df["Id"] = random_uuids + df.to_csv(tmp_file.name, index=False) + + # THEN the annotations are updated + self._submit_and_verify_manifest( + helpers=helpers, + mocker=mocker, + synapse_store=synapse_store, + manifest_path=tmp_file.name, + dataset_id=dataset_folder.id, + manifest_record_type="table_and_file", + validate_component=None, + dataset_scope=None, + # Find by name instead of ID + expected_table_id=None, + expected_table_name="bulkrna-seqassay_synapse_storage_manifest_table", + project_id=project_id, + # Find by name instead of ID + expected_manifest_id=None, + expected_manifest_name="synapse_storage_manifest_bulkrna-seqassay.csv", + already_spied=True, + ) + + @pytest.mark.single_process_execution + async def test_submit_filebased_manifest_table_and_file_mock_filename( + self, + helpers: Helpers, + mocker: MockerFixture, + synapse_store: SynapseStorage, + schedule_for_cleanup: Callable[[CleanupItem], None], + ) -> None: + # GIVEN a project that exists in Synapse + project_id = "syn23643250" + + # AND a dataset/files that exist in Synapse + dataset_folder = await Folder( + name=f"test_submit_filebased_manifest_table_and_file_mock_filename_{uuid.uuid4()}", + files=[file_instance(), file_instance()], + parent_id=project_id, + ).store_async(synapse_client=synapse_store.syn) + schedule_for_cleanup(CleanupItem(synapse_id=dataset_folder.id)) + # Wait for the fileview to be updated + await asyncio.sleep(10) + + # AND a CSV file on disk + filenames = [ + f"schematic - main/{dataset_folder.name}/{file.name}" + for file in dataset_folder.files + ] + entity_ids = [file.id for file in dataset_folder.files] + random_uuids = [str(uuid.uuid4()) for _ in range(len(filenames))] + data = { + "Filename": filenames, + "Sample ID": random_uuids, + "Id": random_uuids, + "Component": ["MockFilename" for _ in range(len(filenames))], + "entityId": entity_ids, + } + df = pd.DataFrame(data) + + with tempfile.NamedTemporaryFile( + delete=True, + suffix=".csv", + dir=create_temp_folder(path=tempfile.gettempdir()), + ) as tmp_file: + df.to_csv(tmp_file.name, index=False) + + # WHEN the manifest is submitted (Assertions are handled in the helper method) + self._submit_and_verify_manifest( + helpers=helpers, + mocker=mocker, + synapse_store=synapse_store, + manifest_path=tmp_file.name, + dataset_id=dataset_folder.id, + manifest_record_type="table_and_file", + validate_component="MockFilename", + dataset_scope=dataset_folder.id, + # Find by name instead of ID + expected_table_id=None, + expected_table_name="mockfilename_synapse_storage_manifest_table", + project_id=project_id, + # Find by name instead of ID + expected_manifest_id=None, + expected_manifest_name="synapse_storage_manifest_mockfilename.csv", + ) def _submit_and_verify_manifest( self, helpers, mocker, - synapse_store, - manifest_path, - dataset_id, - expected_manifest_id, - expected_table_id, - manifest_record_type, - validate_component=None, - dataset_scope=None, - ): + synapse_store: SynapseStorage, + manifest_path: str, + dataset_id: str, + manifest_record_type: str, + project_id: Optional[str] = None, + expected_table_id: Optional[str] = None, + expected_table_name: Optional[str] = None, + expected_manifest_id: Optional[str] = None, + expected_manifest_name: Optional[str] = None, + validate_component: Optional[str] = None, + dataset_scope: Optional[str] = None, + already_spied: bool = False, + ) -> None: + """Handles submission and verification of file-based manifests. + + Args: + helpers: Test helper functions + mocker: Pytest mocker fixture + synapse_store: Synapse storage object + manifest_path: Path to the manifest file + dataset_id: Synapse ID of the dataset + manifest_record_type: Type of manifest record + project_id: Synapse ID of the project (Required if using `expected_table_name`) + expected_table_id: Synapse ID of the expected table (Alternative to `expected_table_name`) + expected_table_name: Name of the expected table (Alternative to `expected_table_id`) + expected_manifest_id: Synapse ID of the expected manifest (Alternative to `expected_manifest_name`) + expected_manifest_name: Name of the expected manifest (Alternative to `expected_manifest_id`) + validate_component: Component to validate + dataset_scope: Dataset scope + already_spied: Whether the methods have already been spied + """ + if not (expected_table_id or (expected_table_name and project_id)): + raise ValueError( + "expected_table_id or (expected_table_name + project_id) must be provided" + ) + if not (expected_manifest_id or expected_manifest_name): + raise ValueError( + "expected_manifest_id or expected_manifest_name must be provided" + ) + # Spies - spy_upload_file_as_csv = mocker.spy(SynapseStorage, "upload_manifest_as_csv") - spy_upload_file_as_table = mocker.spy( - SynapseStorage, "upload_manifest_as_table" - ) - spy_upload_file_combo = mocker.spy(SynapseStorage, "upload_manifest_combo") - spy_add_annotations = mocker.spy( - SynapseStorage, "add_annotations_to_entities_files" - ) + if already_spied: + spy_upload_file_as_csv = SynapseStorage.upload_manifest_as_csv + spy_upload_file_as_table = SynapseStorage.upload_manifest_as_table + spy_upload_file_combo = SynapseStorage.upload_manifest_combo + spy_add_annotations = SynapseStorage.add_annotations_to_entities_files + else: + spy_upload_file_as_csv = mocker.spy( + SynapseStorage, "upload_manifest_as_csv" + ) + spy_upload_file_as_table = mocker.spy( + SynapseStorage, "upload_manifest_as_table" + ) + spy_upload_file_combo = mocker.spy(SynapseStorage, "upload_manifest_combo") + spy_add_annotations = mocker.spy( + SynapseStorage, "add_annotations_to_entities_files" + ) # GIVEN a metadata model object using class labels meta_data_model = metadata_model(helpers, "class_label") @@ -205,10 +495,13 @@ def _submit_and_verify_manifest( ) # AND the files should be annotated - spy_add_annotations.assert_called_once() + if already_spied: + spy_add_annotations.call_count == 2 + else: + spy_add_annotations.call_count == 1 # AND the annotations on the entities should have the correct metadata - for index, row in manifest.iterrows(): + for _, row in manifest.iterrows(): entityId = row["entityId"] expected_sample_id = row["Sample ID"] annos = synapse_store.syn.get_annotations(entityId) @@ -216,6 +509,10 @@ def _submit_and_verify_manifest( assert str(sample_id) == str(expected_sample_id) # AND the annotations on the manifest file itself are correct + expected_manifest_id = expected_manifest_id or synapse_store.syn.findEntityId( + name=expected_manifest_name, + parent=dataset_id, + ) manifest_file_annotations = synapse_store.syn.get_annotations( expected_manifest_id ) @@ -228,6 +525,10 @@ def _submit_and_verify_manifest( if manifest_record_type == "table_and_file": with tempfile.TemporaryDirectory() as download_dir: + expected_table_id = expected_table_id or synapse_store.syn.findEntityId( + name=expected_table_name, + parent=project_id, + ) manifest_table = synapse_store.syn.tableQuery( f"select * from {expected_table_id}", downloadLocation=download_dir ).asDataFrame() @@ -252,10 +553,16 @@ def _submit_and_verify_manifest( # AND the correct upload methods were called for the given record type if manifest_record_type == "file_and_entities": - spy_upload_file_as_csv.assert_called_once() + if already_spied: + spy_upload_file_as_csv.call_count == 2 + else: + spy_upload_file_as_csv.call_count == 1 spy_upload_file_as_table.assert_not_called() spy_upload_file_combo.assert_not_called() elif manifest_record_type == "table_and_file": - spy_upload_file_as_table.assert_called_once() + if already_spied: + spy_upload_file_as_table.call_count == 2 + else: + spy_upload_file_as_table.call_count == 1 spy_upload_file_as_csv.assert_not_called() spy_upload_file_combo.assert_not_called() diff --git a/tests/integration/test_store_synapse.py b/tests/integration/test_store_synapse.py index 0bf23c1b1..085a48fc3 100644 --- a/tests/integration/test_store_synapse.py +++ b/tests/integration/test_store_synapse.py @@ -6,7 +6,6 @@ from schematic.schemas.data_model_graph import DataModelGraphExplorer from schematic.store.synapse import SynapseStorage from schematic.utils.validate_utils import comma_separated_list_regex -from tests.conftest import Helpers class TestStoreSynapse: diff --git a/tests/integration/test_submit_manifest.py b/tests/integration/test_submit_manifest.py deleted file mode 100644 index cc14de487..000000000 --- a/tests/integration/test_submit_manifest.py +++ /dev/null @@ -1,112 +0,0 @@ -import io -import logging -import uuid -from typing import Dict, Generator - -import flask -import pytest -from flask.testing import FlaskClient - -from schematic.store.synapse import SynapseStorage -from schematic_api.api import create_app -from tests.conftest import Helpers - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -DATA_MODEL_JSON_LD = "https://raw.githubusercontent.com/Sage-Bionetworks/schematic/develop/tests/data/example.model.jsonld" - - -@pytest.fixture(scope="class") -def app() -> flask.Flask: - app = create_app() - return app - - -@pytest.fixture(scope="class") -def client(app: flask.Flask) -> Generator[FlaskClient, None, None]: - app.config["SCHEMATIC_CONFIG"] = None - - with app.test_client() as client: - yield client - - -@pytest.fixture -def request_headers(syn_token: str) -> Dict[str, str]: - headers = {"Authorization": "Bearer " + syn_token} - return headers - - -@pytest.mark.schematic_api -class TestManifestSubmission: - @pytest.mark.synapse_credentials_needed - @pytest.mark.submission - def test_submit_nested_manifest_table_and_file_replace( - self, - client: FlaskClient, - request_headers: Dict[str, str], - helpers: Helpers, - synapse_store: SynapseStorage, - ) -> None: - # GIVEN the parameters to submit a manifest - params = { - "schema_url": DATA_MODEL_JSON_LD, - "data_type": "BulkRNA-seqAssay", - "restrict_rules": False, - "manifest_record_type": "table_and_file", - "asset_view": "syn63646213", - "dataset_id": "syn63646197", - "table_manipulation": "replace", - "data_model_labels": "class_label", - "table_column_names": "display_name", - } - - # AND a test manifest with a nested file entity - nested_manifest_replace_csv = helpers.get_data_path( - "mock_manifests/TestManifestOperation_test_submit_nested_manifest_table_and_file_replace.csv" - ) - - # AND a randomized annotation we can verify was added - df = helpers.get_data_frame(path=nested_manifest_replace_csv) - randomized_annotation_content = str(uuid.uuid4()) - df["RandomizedAnnotation"] = randomized_annotation_content - csv_file = io.BytesIO() - df.to_csv(csv_file, index=False) - csv_file.seek(0) # Rewind the buffer to the beginning - - # WHEN I submit that manifest - response_csv = client.post( - "http://localhost:3001/v1/model/submit", - query_string=params, - data={"file_name": (csv_file, "test.csv")}, - headers=request_headers, - ) - - # THEN the submission should be successful - assert response_csv.status_code == 200 - - # AND the file should be uploaded to Synapse with the new annotation - modified_file = synapse_store.syn.get(df["entityId"][0], downloadFile=False) - assert modified_file is not None - assert modified_file["RandomizedAnnotation"][0] == randomized_annotation_content - - # AND the manifest should exist in the dataset folder - manifest_synapse_id = synapse_store.syn.findEntityId( - name="synapse_storage_manifest_bulkrna-seqassay.csv", parent="syn63646197" - ) - assert manifest_synapse_id is not None - synapse_manifest_entity = synapse_store.syn.get( - entity=manifest_synapse_id, downloadFile=False - ) - assert synapse_manifest_entity is not None - assert ( - synapse_manifest_entity["_file_handle"]["fileName"] - == "synapse_storage_manifest_bulkrna-seqassay.csv" - ) - - # AND the manifest table is created - expected_table_name = "bulkrna-seqassay_synapse_storage_manifest_table" - synapse_id = synapse_store.syn.findEntityId( - parent="syn23643250", name=expected_table_name - ) - assert synapse_id is not None diff --git a/tests/test_api.py b/tests/test_api.py index 0a27b5c73..1f2d79add 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -2,19 +2,23 @@ import logging import os import re +import uuid from math import ceil from time import perf_counter from typing import Dict, Generator, List, Tuple, Union +from unittest.mock import patch import flask import pandas as pd # third party library import import pytest from flask.testing import FlaskClient +from opentelemetry import trace from schematic.configuration.configuration import Configuration from schematic.schemas.data_model_graph import DataModelGraph, DataModelGraphExplorer from schematic.schemas.data_model_parser import DataModelParser -from schematic_api.api import create_app +from schematic.utils.general import create_temp_folder +from schematic.configuration.configuration import CONFIG logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) @@ -24,26 +28,20 @@ @pytest.fixture(scope="class") -def app() -> flask.Flask: - app = create_app() - return app +def client(flask_app: flask.Flask) -> Generator[FlaskClient, None, None]: + flask_app.config["SCHEMATIC_CONFIG"] = None - -@pytest.fixture(scope="class") -def client(app: flask.Flask) -> Generator[FlaskClient, None, None]: - app.config["SCHEMATIC_CONFIG"] = None - - with app.test_client() as client: + with flask_app.test_client() as client: yield client -@pytest.fixture(scope="class") +@pytest.fixture(scope="function") def valid_test_manifest_csv(helpers) -> str: test_manifest_path = helpers.get_data_path("mock_manifests/Valid_Test_Manifest.csv") return test_manifest_path -@pytest.fixture(scope="class") +@pytest.fixture(scope="function") def valid_filename_manifest_csv(helpers) -> str: test_manifest_path = helpers.get_data_path( "mock_manifests/ValidFilenameManifest.csv" @@ -51,7 +49,7 @@ def valid_filename_manifest_csv(helpers) -> str: return test_manifest_path -@pytest.fixture(scope="class") +@pytest.fixture(scope="function") def invalid_filename_manifest_csv(helpers) -> str: test_manifest_path = helpers.get_data_path( "mock_manifests/InvalidFilenameManifest.csv" @@ -59,7 +57,7 @@ def invalid_filename_manifest_csv(helpers) -> str: return test_manifest_path -@pytest.fixture(scope="class") +@pytest.fixture(scope="function") def test_manifest_submit(helpers) -> str: test_manifest_path = helpers.get_data_path( "mock_manifests/example_biospecimen_test.csv" @@ -67,7 +65,7 @@ def test_manifest_submit(helpers) -> str: return test_manifest_path -@pytest.fixture(scope="class") +@pytest.fixture(scope="function") def test_invalid_manifest(helpers) -> pd.DataFrame: test_invalid_manifest = helpers.get_data_frame( "mock_manifests/Invalid_Test_Manifest.csv", preserve_raw_input=False @@ -75,7 +73,7 @@ def test_invalid_manifest(helpers) -> pd.DataFrame: return test_invalid_manifest -@pytest.fixture(scope="class") +@pytest.fixture(scope="function") def test_upsert_manifest_csv(helpers) -> str: test_upsert_manifest_path = helpers.get_data_path( "mock_manifests/rdb_table_manifest.csv" @@ -83,7 +81,7 @@ def test_upsert_manifest_csv(helpers) -> str: return test_upsert_manifest_path -@pytest.fixture(scope="class") +@pytest.fixture(scope="function") def test_manifest_json(helpers) -> str: test_manifest_path = helpers.get_data_path( "mock_manifests/Example.Patient.manifest.json" @@ -120,15 +118,40 @@ def get_MockComponent_attribute() -> Generator[str, None, None]: yield MockComponent_attribute +def get_traceparent() -> str: + """Create and format the `traceparent` to used in the header of the request. This + is used by opentelemetry to attach the context that was started outside of the + flask server to the request. The purpose is so that we can propagate the trace + context across services.""" + current_span = trace.get_current_span() + span_context = current_span.get_span_context() + trace_id = format(span_context.trace_id, "032x") + span_id = format(span_context.span_id, "016x") + trace_flags = format(span_context.trace_flags, "02x") + + traceparent = f"00-{trace_id}-{span_id}-{trace_flags}" + + return traceparent + + @pytest.fixture def request_headers(syn_token: str) -> Dict[str, str]: - headers = {"Authorization": "Bearer " + syn_token} + headers = {"Authorization": "Bearer " + syn_token, "traceparent": get_traceparent()} + return headers + + +@pytest.fixture +def request_headers_trace() -> Dict[str, str]: + headers = {"traceparent": get_traceparent()} return headers @pytest.fixture def request_invalid_headers() -> Dict[str, str]: - headers = {"Authorization": "Bearer invalid headers"} + headers = { + "Authorization": "Bearer invalid headers", + "traceparent": get_traceparent(), + } return headers @@ -183,6 +206,7 @@ def test_get_storage_assets_tables( else: pass + @pytest.mark.slow_test @pytest.mark.synapse_credentials_needed @pytest.mark.parametrize("full_path", [True, False]) @pytest.mark.parametrize("file_names", [None, "Sample_A.txt"]) @@ -338,7 +362,9 @@ def test_if_in_assetview( @pytest.mark.schematic_api class TestMetadataModelOperation: @pytest.mark.parametrize("as_graph", [True, False]) - def test_component_requirement(self, client: FlaskClient, as_graph: bool) -> None: + def test_component_requirement( + self, client: FlaskClient, as_graph: bool, request_headers_trace: Dict[str, str] + ) -> None: params = { "schema_url": DATA_MODEL_JSON_LD, "source_component": "BulkRNA-seqAssay", @@ -346,7 +372,9 @@ def test_component_requirement(self, client: FlaskClient, as_graph: bool) -> Non } response = client.get( - "http://localhost:3001/v1/model/component-requirements", query_string=params + "http://localhost:3001/v1/model/component-requirements", + query_string=params, + headers=request_headers_trace, ) assert response.status_code == 200 @@ -366,7 +394,10 @@ def test_component_requirement(self, client: FlaskClient, as_graph: bool) -> Non class TestUtilsOperation: @pytest.mark.parametrize("strict_camel_case", [True, False]) def test_get_property_label_from_display_name( - self, client: FlaskClient, strict_camel_case: bool + self, + client: FlaskClient, + strict_camel_case: bool, + request_headers_trace: Dict[str, str], ) -> None: params = { "display_name": "mocular entity", @@ -376,6 +407,7 @@ def test_get_property_label_from_display_name( response = client.get( "http://localhost:3001/v1/utils/get_property_label_from_display_name", query_string=params, + headers=request_headers_trace, ) assert response.status_code == 200 @@ -389,10 +421,14 @@ def test_get_property_label_from_display_name( @pytest.mark.schematic_api class TestDataModelGraphExplorerOperation: - def test_get_schema(self, client: FlaskClient) -> None: + def test_get_schema( + self, client: FlaskClient, request_headers_trace: Dict[str, str] + ) -> None: params = {"schema_url": DATA_MODEL_JSON_LD, "data_model_labels": "class_label"} response = client.get( - "http://localhost:3001/v1/schemas/get/schema", query_string=params + "http://localhost:3001/v1/schemas/get/schema", + query_string=params, + headers=request_headers_trace, ) response_dt = response.data @@ -403,7 +439,9 @@ def test_get_schema(self, client: FlaskClient) -> None: if os.path.exists(response_dt): os.remove(response_dt) - def test_if_node_required(test, client: FlaskClient) -> None: + def test_if_node_required( + test, client: FlaskClient, request_headers_trace: Dict[str, str] + ) -> None: params = { "schema_url": DATA_MODEL_JSON_LD, "node_display_name": "FamilyHistory", @@ -411,13 +449,17 @@ def test_if_node_required(test, client: FlaskClient) -> None: } response = client.get( - "http://localhost:3001/v1/schemas/is_node_required", query_string=params + "http://localhost:3001/v1/schemas/is_node_required", + query_string=params, + headers=request_headers_trace, ) response_dta = json.loads(response.data) assert response.status_code == 200 assert response_dta == True - def test_get_node_validation_rules(test, client: FlaskClient) -> None: + def test_get_node_validation_rules( + test, client: FlaskClient, request_headers_trace: Dict[str, str] + ) -> None: params = { "schema_url": DATA_MODEL_JSON_LD, "node_display_name": "CheckRegexList", @@ -425,13 +467,16 @@ def test_get_node_validation_rules(test, client: FlaskClient) -> None: response = client.get( "http://localhost:3001/v1/schemas/get_node_validation_rules", query_string=params, + headers=request_headers_trace, ) response_dta = json.loads(response.data) assert response.status_code == 200 assert "list" in response_dta assert "regex match [a-f]" in response_dta - def test_get_nodes_display_names(test, client: FlaskClient) -> None: + def test_get_nodes_display_names( + test, client: FlaskClient, request_headers_trace: Dict[str, str] + ) -> None: params = { "schema_url": DATA_MODEL_JSON_LD, "node_list": ["FamilyHistory", "Biospecimen"], @@ -439,6 +484,7 @@ def test_get_nodes_display_names(test, client: FlaskClient) -> None: response = client.get( "http://localhost:3001/v1/schemas/get_nodes_display_names", query_string=params, + headers=request_headers_trace, ) response_dta = json.loads(response.data) assert response.status_code == 200 @@ -447,19 +493,29 @@ def test_get_nodes_display_names(test, client: FlaskClient) -> None: @pytest.mark.parametrize( "relationship", ["parentOf", "requiresDependency", "rangeValue", "domainValue"] ) - def test_get_subgraph_by_edge(self, client: FlaskClient, relationship: str) -> None: + def test_get_subgraph_by_edge( + self, + client: FlaskClient, + relationship: str, + request_headers_trace: Dict[str, str], + ) -> None: params = {"schema_url": DATA_MODEL_JSON_LD, "relationship": relationship} response = client.get( "http://localhost:3001/v1/schemas/get/graph_by_edge_type", query_string=params, + headers=request_headers_trace, ) assert response.status_code == 200 @pytest.mark.parametrize("return_display_names", [True, False]) @pytest.mark.parametrize("node_label", ["FamilyHistory", "TissueStatus"]) def test_get_node_range( - self, client: FlaskClient, return_display_names: bool, node_label: str + self, + client: FlaskClient, + return_display_names: bool, + node_label: str, + request_headers_trace: Dict[str, str], ) -> None: params = { "schema_url": DATA_MODEL_JSON_LD, @@ -468,7 +524,9 @@ def test_get_node_range( } response = client.get( - "http://localhost:3001/v1/schemas/get_node_range", query_string=params + "http://localhost:3001/v1/schemas/get_node_range", + query_string=params, + headers=request_headers_trace, ) response_dt = json.loads(response.data) assert response.status_code == 200 @@ -490,6 +548,7 @@ def test_node_dependencies( source_node: str, return_display_names: Union[bool, None], return_schema_ordered: Union[bool, None], + request_headers_trace: Dict[str, str], ) -> None: return_display_names = True return_schema_ordered = False @@ -504,6 +563,7 @@ def test_node_dependencies( response = client.get( "http://localhost:3001/v1/schemas/get_node_dependencies", query_string=params, + headers=request_headers_trace, ) response_dt = json.loads(response.data) assert response.status_code == 200 @@ -748,7 +808,11 @@ def test_generate_new_manifest( ], ) def test_generate_manifest_file_based_annotations( - self, client: FlaskClient, use_annotations: bool, expected: list[str] + self, + client: FlaskClient, + use_annotations: bool, + expected: list[str], + request_headers_trace: Dict[str, str], ) -> None: params = { "schema_url": DATA_MODEL_JSON_LD, @@ -759,9 +823,16 @@ def test_generate_manifest_file_based_annotations( "use_annotations": use_annotations, } - response = client.get( - "http://localhost:3001/v1/manifest/generate", query_string=params - ) + try: + response = client.get( + "http://localhost:3001/v1/manifest/generate", + query_string=params, + headers=request_headers_trace, + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") assert response.status_code == 200 response_google_sheet = json.loads(response.data) @@ -798,7 +869,7 @@ def test_generate_manifest_file_based_annotations( # test case: generate a manifest with annotations when use_annotations is set to True for a component that is not file-based # the dataset folder does not contain an existing manifest def test_generate_manifest_not_file_based_with_annotations( - self, client: FlaskClient + self, client: FlaskClient, request_headers_trace: Dict[str, str] ) -> None: params = { "schema_url": DATA_MODEL_JSON_LD, @@ -808,9 +879,16 @@ def test_generate_manifest_not_file_based_with_annotations( "output_format": "google_sheet", "use_annotations": False, } - response = client.get( - "http://localhost:3001/v1/manifest/generate", query_string=params - ) + try: + response = client.get( + "http://localhost:3001/v1/manifest/generate", + query_string=params, + headers=request_headers_trace, + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") assert response.status_code == 200 response_google_sheet = json.loads(response.data) @@ -833,21 +911,28 @@ def test_generate_manifest_not_file_based_with_annotations( ] ) - def test_generate_manifest_data_type_not_found(self, client: FlaskClient) -> None: + def test_generate_manifest_data_type_not_found( + self, client: FlaskClient, request_headers_trace: Dict[str, str] + ) -> None: params = { "schema_url": DATA_MODEL_JSON_LD, "data_type": "wrong data type", "use_annotations": False, } response = client.get( - "http://localhost:3001/v1/manifest/generate", query_string=params + "http://localhost:3001/v1/manifest/generate", + query_string=params, + headers=request_headers_trace, ) assert response.status_code == 500 assert "LookupError" in str(response.data) def test_populate_manifest( - self, client: FlaskClient, valid_test_manifest_csv: str + self, + client: FlaskClient, + valid_test_manifest_csv: str, + request_headers_trace: Dict[str, str], ) -> None: # test manifest test_manifest_data = open(valid_test_manifest_csv, "rb") @@ -860,7 +945,9 @@ def test_populate_manifest( } response = client.get( - "http://localhost:3001/v1/manifest/generate", query_string=params + "http://localhost:3001/v1/manifest/generate", + query_string=params, + headers=request_headers_trace, ) assert response.status_code == 200 @@ -925,7 +1012,12 @@ def test_validate_manifest( data = None if test_manifest_fixture: test_manifest_path = request.getfixturevalue(test_manifest_fixture) - data = {"file_name": (open(test_manifest_path, "rb"), "test.csv")} + data = { + "file_name": ( + open(test_manifest_path, "rb"), + f"test_{uuid.uuid4()}.csv", + ) + } # AND the appropriate headers for the test if update_headers: @@ -1001,33 +1093,39 @@ def test_manifest_download( "new_manifest_name": new_manifest_name, "as_json": as_json, } - - response = client.get( - "http://localhost:3001/v1/manifest/download", - query_string=params, - headers=request_headers, + temp_manifest_folder = create_temp_folder( + path=config.manifest_folder, prefix=str(uuid.uuid4()) ) + with patch( + "schematic.store.synapse.create_temp_folder", + return_value=temp_manifest_folder, + ) as mock_create_temp_folder: + response = client.get( + "http://localhost:3001/v1/manifest/download", + query_string=params, + headers=request_headers, + ) + mock_create_temp_folder.assert_called_once() assert response.status_code == 200 # if as_json is set to True or as_json is not defined, then a json gets returned if as_json or as_json is None: - response_dta = json.loads(response.data) + response_data = json.loads(response.data) # check if the correct manifest gets downloaded - assert response_dta[0]["Component"] == expected_component - - current_work_dir = os.getcwd() - folder_test_manifests = config.manifest_folder - folder_dir = os.path.join(current_work_dir, folder_test_manifests) + assert response_data[0]["Component"] == expected_component + assert temp_manifest_folder is not None # if a manfiest gets renamed, get new manifest file path if new_manifest_name: manifest_file_path = os.path.join( - folder_dir, new_manifest_name + "." + "csv" + temp_manifest_folder, new_manifest_name + "." + "csv" ) # if a manifest does not get renamed, get existing manifest file path else: - manifest_file_path = os.path.join(folder_dir, expected_file_name) + manifest_file_path = os.path.join( + temp_manifest_folder, expected_file_name + ) else: # manifest file path gets returned @@ -1081,15 +1179,20 @@ def test_dataset_manifest_download( "as_json": as_json, "new_manifest_name": new_manifest_name, } - - response = client.get( - "http://localhost:3001/v1/dataset/manifest/download", - query_string=params, - headers=request_headers, - ) + try: + response = client.get( + "http://localhost:3001/v1/dataset/manifest/download", + query_string=params, + headers=request_headers, + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") assert response.status_code == 200 response_dt = response.data + # TODO: Check assertions if as_json: response_json = json.loads(response_dt) assert response_json[0]["Component"] == "BulkRNA-seqAssay" @@ -1124,15 +1227,25 @@ def test_submit_manifest_table_and_file_replace( "data_model_labels": "class_label", "table_column_names": "class_label", } - - response_csv = client.post( - "http://localhost:3001/v1/model/submit", - query_string=params, - data={"file_name": (open(test_manifest_submit, "rb"), "test.csv")}, - headers=request_headers, - ) + try: + response_csv = client.post( + "http://localhost:3001/v1/model/submit", + query_string=params, + data={ + "file_name": ( + open(test_manifest_submit, "rb"), + f"test_{uuid.uuid4()}.csv", + ) + }, + headers=request_headers, + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") assert response_csv.status_code == 200 + @pytest.mark.slow_test @pytest.mark.synapse_credentials_needed @pytest.mark.submission @pytest.mark.parametrize( @@ -1185,12 +1298,19 @@ def test_submit_manifest_file_only_replace( params.update(specific_params) manifest_path = request.getfixturevalue(manifest_path_fixture) - response_csv = client.post( - "http://localhost:3001/v1/model/submit", - query_string=params, - data={"file_name": (open(manifest_path, "rb"), "test.csv")}, - headers=request_headers, - ) + try: + response_csv = client.post( + "http://localhost:3001/v1/model/submit", + query_string=params, + data={ + "file_name": (open(manifest_path, "rb"), f"test_{uuid.uuid4()}.csv") + }, + headers=request_headers, + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") assert response_csv.status_code == 200 @pytest.mark.synapse_credentials_needed @@ -1213,12 +1333,17 @@ def test_submit_manifest_json_str_replace( "table_column_names": "class_label", } params["json_str"] = json_str - response = client.post( - "http://localhost:3001/v1/model/submit", - query_string=params, - data={"file_name": ""}, - headers=request_headers, - ) + try: + response = client.post( + "http://localhost:3001/v1/model/submit", + query_string=params, + data={"file_name": ""}, + headers=request_headers, + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") assert response.status_code == 200 @pytest.mark.synapse_credentials_needed @@ -1243,14 +1368,25 @@ def test_submit_manifest_w_file_and_entities( } # test uploading a csv file - response_csv = client.post( - "http://localhost:3001/v1/model/submit", - query_string=params, - data={"file_name": (open(test_manifest_submit, "rb"), "test.csv")}, - headers=request_headers, - ) + try: + response_csv = client.post( + "http://localhost:3001/v1/model/submit", + query_string=params, + data={ + "file_name": ( + open(test_manifest_submit, "rb"), + f"test_{uuid.uuid4()}.csv", + ) + }, + headers=request_headers, + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") assert response_csv.status_code == 200 + @pytest.mark.slow_test @pytest.mark.synapse_credentials_needed @pytest.mark.submission def test_submit_manifest_table_and_file_upsert( @@ -1273,12 +1409,22 @@ def test_submit_manifest_table_and_file_upsert( } # test uploading a csv file - response_csv = client.post( - "http://localhost:3001/v1/model/submit", - query_string=params, - data={"file_name": (open(test_upsert_manifest_csv, "rb"), "test.csv")}, - headers=request_headers, - ) + try: + response_csv = client.post( + "http://localhost:3001/v1/model/submit", + query_string=params, + data={ + "file_name": ( + open(test_upsert_manifest_csv, "rb"), + f"test_{uuid.uuid4()}.csv", + ) + }, + headers=request_headers, + ) + finally: + # Resets the config to its default state + # TODO: remove with https://sagebionetworks.jira.com/browse/SCHEMATIC-202 + CONFIG.load_config("config_example.yml") assert response_csv.status_code == 200 @pytest.mark.synapse_credentials_needed @@ -1307,7 +1453,12 @@ def test_submit_and_validate_filebased_manifest( response_csv = client.post( "http://localhost:3001/v1/model/submit", query_string=params, - data={"file_name": (open(valid_filename_manifest_csv, "rb"), "test.csv")}, + data={ + "file_name": ( + open(valid_filename_manifest_csv, "rb"), + f"test_{uuid.uuid4()}.csv", + ) + }, headers=request_headers, ) @@ -1317,18 +1468,25 @@ def test_submit_and_validate_filebased_manifest( @pytest.mark.schematic_api class TestSchemaVisualization: - def test_visualize_attributes(self, client: FlaskClient) -> None: + def test_visualize_attributes( + self, client: FlaskClient, request_headers_trace: Dict[str, str] + ) -> None: params = {"schema_url": DATA_MODEL_JSON_LD} response = client.get( - "http://localhost:3001/v1/visualize/attributes", query_string=params + "http://localhost:3001/v1/visualize/attributes", + query_string=params, + headers=request_headers_trace, ) assert response.status_code == 200 @pytest.mark.parametrize("figure_type", ["component", "dependency"]) def test_visualize_tangled_tree_layers( - self, client: FlaskClient, figure_type: str + self, + client: FlaskClient, + figure_type: str, + request_headers_trace: Dict[str, str], ) -> None: # TODO: Determine a 2nd data model to use for this test, test both models sequentially, add checks for content of response params = {"schema_url": DATA_MODEL_JSON_LD, "figure_type": figure_type} @@ -1336,6 +1494,7 @@ def test_visualize_tangled_tree_layers( response = client.get( "http://localhost:3001/v1/visualize/tangled_tree/layers", query_string=params, + headers=request_headers_trace, ) assert response.status_code == 200 @@ -1436,7 +1595,11 @@ def test_visualize_tangled_tree_layers( ], ) def test_visualize_component( - self, client: FlaskClient, component: str, response_text: str + self, + client: FlaskClient, + component: str, + response_text: str, + request_headers_trace: Dict[str, str], ) -> None: params = { "schema_url": DATA_MODEL_JSON_LD, @@ -1446,7 +1609,9 @@ def test_visualize_component( } response = client.get( - "http://localhost:3001/v1/visualize/component", query_string=params + "http://localhost:3001/v1/visualize/component", + query_string=params, + headers=request_headers_trace, ) assert response.status_code == 200 @@ -1487,7 +1652,11 @@ def test_validation_performance( "schema_url": BENCHMARK_DATA_MODEL_JSON_LD, "data_type": "MockComponent", } - headers = {"Content-Type": "multipart/form-data", "Accept": "application/json"} + headers = { + "Content-Type": "multipart/form-data", + "Accept": "application/json", + "traceparent": get_traceparent(), + } # Enforce error rate when possible if MockComponent_attribute == "Check Ages": @@ -1521,7 +1690,12 @@ def test_validation_performance( response = client.post( endpoint_url, query_string=params, - data={"file_name": (open(large_manifest_path, "rb"), "large_test.csv")}, + data={ + "file_name": ( + open(large_manifest_path, "rb"), + f"large_test_{uuid.uuid4()}.csv", + ) + }, headers=headers, ) response_time = perf_counter() - t_start diff --git a/tests/test_cli.py b/tests/test_cli.py index 308f9c73f..a6e4e8ef7 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,14 +1,13 @@ import os - -import pytest from unittest.mock import patch +import pytest from click.testing import CliRunner -from schematic.schemas.commands import schema +from schematic.configuration.configuration import Configuration from schematic.manifest.commands import manifest from schematic.models.commands import model -from schematic.configuration.configuration import Configuration +from schematic.schemas.commands import schema from tests.conftest import Helpers @@ -156,9 +155,9 @@ def test_submit_file_based_manifest( runner: CliRunner, helpers: Helpers, with_annotations: bool, - config: Configuration, ) -> None: manifest_path = helpers.get_data_path("mock_manifests/test_BulkRNAseq.csv") + config = Configuration() config.load_config("config_example.yml") config.synapse_master_fileview_id = "syn1234" diff --git a/tests/test_ge_helpers.py b/tests/test_ge_helpers.py index 6e0b3e01c..8ecaac2e9 100644 --- a/tests/test_ge_helpers.py +++ b/tests/test_ge_helpers.py @@ -8,7 +8,7 @@ from tests.conftest import Helpers -@pytest.fixture(scope="class") +@pytest.fixture(scope="function") def mock_ge_helpers( helpers: Helpers, ) -> Generator[GreatExpectationsHelpers, None, None]: @@ -34,39 +34,9 @@ def test_add_expectation_suite_if_not_exists_does_not_exist( """test add_expectation_suite_if_not_exists method when the expectation suite does not exists""" # mock context provided by ge_helpers mock_ge_helpers.context = MagicMock() - mock_ge_helpers.context.list_expectation_suite_names.return_value = [] # Call the method - result = mock_ge_helpers.add_expectation_suite_if_not_exists() + mock_ge_helpers.add_expectation_suite_if_not_exists() # Make sure the method of creating expectation suites if it doesn't exist - mock_ge_helpers.context.list_expectation_suite_names.assert_called_once() - mock_ge_helpers.context.add_expectation_suite.assert_called_once_with( - expectation_suite_name="Manifest_test_suite" - ) - - def test_add_expectation_suite_if_not_exists_does_exist( - self, mock_ge_helpers: Generator[GreatExpectationsHelpers, None, None] - ) -> None: - """test add_expectation_suite_if_not_exists method when the expectation suite does exists""" - # mock context provided by ge_helpers - mock_ge_helpers.context = MagicMock() - mock_ge_helpers.context.list_expectation_suite_names.return_value = [ - "Manifest_test_suite" - ] - mock_ge_helpers.context.list_checkpoints.return_value = ["test_checkpoint"] - - # Call the method - result = mock_ge_helpers.add_expectation_suite_if_not_exists() - - # Make sure the method of deleting suites gets called - mock_ge_helpers.context.list_expectation_suite_names.assert_called_once() - mock_ge_helpers.context.delete_expectation_suite.assert_called_once_with( - "Manifest_test_suite" - ) - mock_ge_helpers.context.add_expectation_suite.assert_called_once_with( - expectation_suite_name="Manifest_test_suite" - ) - mock_ge_helpers.context.delete_checkpoint.assert_called_once_with( - "test_checkpoint" - ) + mock_ge_helpers.context.add_expectation_suite.assert_called_once() diff --git a/tests/test_metadata.py b/tests/test_metadata.py index fca1d3db5..66e2a32bf 100644 --- a/tests/test_metadata.py +++ b/tests/test_metadata.py @@ -100,6 +100,7 @@ def test_populate_manifest(self, helpers, return_excel, data_model_labels): except: pass + @pytest.mark.slow_test @pytest.mark.parametrize("file_annotations_upload", [True, False]) @pytest.mark.parametrize("restrict_rules", [True, False]) @pytest.mark.parametrize("hide_blanks", [True, False]) diff --git a/tests/test_store.py b/tests/test_store.py index 717b4542e..c9f32bec2 100644 --- a/tests/test_store.py +++ b/tests/test_store.py @@ -15,6 +15,7 @@ import pandas as pd import pytest +from numpy import nan from pandas.testing import assert_frame_equal from synapseclient import EntityViewSchema, Folder from synapseclient.core.exceptions import SynapseHTTPError @@ -27,7 +28,7 @@ from schematic.schemas.data_model_parser import DataModelParser from schematic.store.base import BaseStorage from schematic.store.synapse import DatasetFileView, ManifestDownload, SynapseStorage -from schematic.utils.general import check_synapse_cache_size +from schematic.utils.general import check_synapse_cache_size, create_temp_folder from tests.conftest import Helpers from tests.utils import CleanupItem @@ -124,6 +125,11 @@ def dmge( yield dmge +@pytest.fixture +def mock_file() -> File: + return File(parentId="syn123", id="syn456", name="mock_file") + + @pytest.fixture(scope="module") def synapse_store_special_scope(): yield SynapseStorage(perform_query=False) @@ -986,9 +992,75 @@ def test_tidy_table(self, dataset_fileview_table_tidy): assert isinstance(year_value, str) assert year_value == "1980" + def test_tidy_table_no_manifest_uploaded(self, synapse_store): + """ + Test to ensure that the table can be tidied without issue when a DatasetFileView object is instantiated + based on a dataset that has files annotated but no manifest uploaded. + Covers the case where a user validates a manifest with schematic, and annotates the files with a non-schematic tool (ie the R client), + and then tries to generate a manifest for the dataset with schematic. + """ + # GIVEN a dataset that has files annotated (including the eTag annotation) but no manifest uplodaded + dataset_id = "syn64019998" + + # AND the expected metadata from the files in the dataset + expected_metadata = pd.DataFrame( + { + "Component": { + 0: nan, + 1: "BulkRNA-seqAssay", + 2: "BulkRNA-seqAssay", + 3: "BulkRNA-seqAssay", + 4: "BulkRNA-seqAssay", + }, + "FileFormat": {0: nan, 1: "BAM", 2: "BAM", 3: "BAM", 4: "BAM"}, + "GenomeBuild": { + 0: nan, + 1: "GRCh37", + 2: "GRCh37", + 3: "GRCh37", + 4: "GRCh37", + }, + "entityId": { + 0: "syn64019999", + 1: "syn64020000", + 2: "syn64020001", + 3: "syn64020002", + 4: "syn64020003", + }, + }, + ).set_index("entityId", drop=False) + + # WHEN a DatasetFileView object is instantiated based on the dataset + dataset_fileview = DatasetFileView(dataset_id, synapse_store.syn) + + # AND the fileview is queried without being tidied + table = dataset_fileview.query(tidy=False, force=True) + + # THEN a table should be present + assert isinstance(table, pd.DataFrame) + + # AND the table should not be empty + assert not table.empty + + # AND the table should already include the eTag column that will be removed and saved for comparison later + assert "eTag" in table.columns + original_etag_colum = table.pop("eTag") + + # AND when the table is tidied no exception should be raised + with does_not_raise(): + table = dataset_fileview.tidy_table() + + # AND the eTag column should be different from the original eTag column + new_etag_column = table.pop("eTag").reset_index(drop=True) + assert (new_etag_column != original_etag_colum).all() + + # AND the expected metadata should be present in the table + assert_frame_equal(table, expected_metadata) + @pytest.mark.table_operations class TestTableOperations: + @pytest.mark.slow_test @pytest.mark.parametrize( "table_column_names", ["display_name", "display_label", "class_label"], @@ -1020,20 +1092,41 @@ async def test_create_table( # AND a copy of all the folders in the manifest. Added to the dataset directory for easy cleanup manifest = helpers.get_data_frame(manifest_path) - for index, row in manifest.iterrows(): + + async def copy_folder_and_update_manifest( + row: pd.Series, + index: int, + datasetId: str, + synapse_store: SynapseStorage, + manifest: pd.DataFrame, + schedule_for_cleanup: Callable[[CleanupItem], None], + ) -> None: + """Internal function to copy a folder and update the manifest.""" folder_id = row["entityId"] - folder_copy = FolderModel(id=folder_id).copy( + folder_copy = await FolderModel(id=folder_id).copy_async( parent_id=datasetId, synapse_client=synapse_store.syn ) schedule_for_cleanup(CleanupItem(synapse_id=folder_copy.id)) manifest.at[index, "entityId"] = folder_copy.id + tasks = [] + + for index, row in manifest.iterrows(): + tasks.append( + copy_folder_and_update_manifest( + row, index, datasetId, synapse_store, manifest, schedule_for_cleanup + ) + ) + await asyncio.gather(*tasks) + with patch.object( synapse_store, "_generate_table_name", return_value=(table_name, "followup") ), patch.object( synapse_store, "getDatasetProject", return_value=projectId ), tempfile.NamedTemporaryFile( - delete=True, suffix=".csv" + delete=True, + suffix=".csv", + dir=create_temp_folder(path=tempfile.gettempdir()), ) as tmp_file: # Write the DF to a temporary file to prevent modifying the original manifest.to_csv(tmp_file.name, index=False) @@ -1053,11 +1146,14 @@ async def test_create_table( schedule_for_cleanup(CleanupItem(synapse_id=manifest_id)) # THEN the table should exist - existing_tables = synapse_store.get_table_info(projectId=projectId) + existing_table_id = synapse_store.syn.findEntityId( + name=table_name, parent=projectId + ) # assert table exists - assert table_name in existing_tables.keys() + assert existing_table_id is not None + @pytest.mark.slow_test @pytest.mark.parametrize( "table_column_names", ["display_label", "class_label"], @@ -1090,24 +1186,42 @@ async def test_replace_table( # AND a copy of all the folders in the manifest. Added to the dataset directory for easy cleanup manifest = helpers.get_data_frame(manifest_path) replacement_manifest = helpers.get_data_frame(replacement_manifest_path) - for index, row in manifest.iterrows(): + + async def copy_folder_and_update_manifest( + row: pd.Series, + index: int, + datasetId: str, + synapse_store: SynapseStorage, + manifest: pd.DataFrame, + schedule_for_cleanup: Callable[[CleanupItem], None], + ) -> None: + """Internal function to copy a folder and update the manifest.""" folder_id = row["entityId"] - folder_copy = FolderModel(id=folder_id).copy( + folder_copy = await FolderModel(id=folder_id).copy_async( parent_id=datasetId, synapse_client=synapse_store.syn ) schedule_for_cleanup(CleanupItem(synapse_id=folder_copy.id)) manifest.at[index, "entityId"] = folder_copy.id replacement_manifest.at[index, "entityId"] = folder_copy.id - # Check if FollowUp table exists if so delete - existing_tables = synapse_store.get_table_info(projectId=projectId) + tasks = [] + for index, row in manifest.iterrows(): + tasks.append( + copy_folder_and_update_manifest( + row, index, datasetId, synapse_store, manifest, schedule_for_cleanup + ) + ) + + await asyncio.gather(*tasks) with patch.object( synapse_store, "_generate_table_name", return_value=(table_name, "followup") ), patch.object( synapse_store, "getDatasetProject", return_value=projectId ), tempfile.NamedTemporaryFile( - delete=True, suffix=".csv" + delete=True, + suffix=".csv", + dir=create_temp_folder(path=tempfile.gettempdir()), ) as tmp_file: # Write the DF to a temporary file to prevent modifying the original manifest.to_csv(tmp_file.name, index=False) @@ -1125,10 +1239,9 @@ async def test_replace_table( annotation_keys=annotation_keys, ) schedule_for_cleanup(CleanupItem(synapse_id=manifest_id)) - existing_tables = synapse_store.get_table_info(projectId=projectId) # Query table for DaystoFollowUp column - table_id = existing_tables[table_name] + table_id = synapse_store.syn.findEntityId(name=table_name, parent=projectId) days_to_follow_up = ( synapse_store.syn.tableQuery(f"SELECT {column_of_interest} FROM {table_id}") .asDataFrame() @@ -1143,7 +1256,9 @@ async def test_replace_table( ), patch.object( synapse_store, "getDatasetProject", return_value=projectId ), tempfile.NamedTemporaryFile( - delete=True, suffix=".csv" + delete=True, + suffix=".csv", + dir=create_temp_folder(path=tempfile.gettempdir()), ) as tmp_file: # Write the DF to a temporary file to prevent modifying the original replacement_manifest.to_csv(tmp_file.name, index=False) @@ -1161,10 +1276,9 @@ async def test_replace_table( annotation_keys=annotation_keys, ) schedule_for_cleanup(CleanupItem(synapse_id=manifest_id)) - existing_tables = synapse_store.get_table_info(projectId=projectId) # Query table for DaystoFollowUp column - table_id = existing_tables[table_name] + table_id = synapse_store.syn.findEntityId(name=table_name, parent=projectId) days_to_follow_up = ( synapse_store.syn.tableQuery(f"SELECT {column_of_interest} FROM {table_id}") .asDataFrame() @@ -1202,7 +1316,9 @@ async def test_upsert_table( ), patch.object( synapse_store, "getDatasetProject", return_value=projectId ), tempfile.NamedTemporaryFile( - delete=True, suffix=".csv" + delete=True, + suffix=".csv", + dir=create_temp_folder(path=tempfile.gettempdir()), ) as tmp_file: # Copy to a temporary file to prevent modifying the original shutil.copyfile(helpers.get_data_path(manifest_path), tmp_file.name) @@ -1220,10 +1336,9 @@ async def test_upsert_table( annotation_keys=annotation_keys, ) schedule_for_cleanup(CleanupItem(synapse_id=manifest_id)) - existing_tables = synapse_store.get_table_info(projectId=projectId) # set primary key annotation for uploaded table - table_id = existing_tables[table_name] + table_id = synapse_store.syn.findEntityId(name=table_name, parent=projectId) # Query table for DaystoFollowUp column table_query = ( @@ -1242,7 +1357,9 @@ async def test_upsert_table( ), patch.object( synapse_store, "getDatasetProject", return_value=projectId ), tempfile.NamedTemporaryFile( - delete=True, suffix=".csv" + delete=True, + suffix=".csv", + dir=create_temp_folder(path=tempfile.gettempdir()), ) as tmp_file: # Copy to a temporary file to prevent modifying the original shutil.copyfile( @@ -1262,10 +1379,9 @@ async def test_upsert_table( annotation_keys=annotation_keys, ) schedule_for_cleanup(CleanupItem(synapse_id=manifest_id)) - existing_tables = synapse_store.get_table_info(projectId=projectId) # Query table for DaystoFollowUp column - table_id = existing_tables[table_name] + table_id = synapse_store.syn.findEntityId(name=table_name, parent=projectId) table_query = ( synapse_store.syn.tableQuery(f"SELECT {column_of_interest} FROM {table_id}") .asDataFrame() @@ -1321,9 +1437,7 @@ def test_get_manifest_id(self, synapse_store, datasetFileView): @pytest.mark.parametrize("newManifestName", ["", "Example"]) def test_download_manifest(self, mock_manifest_download, newManifestName): # test the download function by downloading a manifest - manifest_data = mock_manifest_download.download_manifest( - mock_manifest_download, newManifestName - ) + manifest_data = mock_manifest_download.download_manifest(newManifestName) assert os.path.exists(manifest_data["path"]) if not newManifestName: @@ -1338,7 +1452,7 @@ def test_download_access_restricted_manifest(self, synapse_store): # attempt to download an uncensored manifest that has access restriction. # if the code works correctly, the censored manifest that does not have access restriction would get downloaded (see: syn29862066) md = ManifestDownload(synapse_store.syn, "syn29862066") - manifest_data = md.download_manifest(md) + manifest_data = md.download_manifest() assert os.path.exists(manifest_data["path"]) @@ -1348,7 +1462,7 @@ def test_download_access_restricted_manifest(self, synapse_store): def test_download_manifest_on_aws(self, mock_manifest_download, monkeypatch): # mock AWS environment by providing SECRETS_MANAGER_SECRETS environment variable and attempt to download a manifest monkeypatch.setenv("SECRETS_MANAGER_SECRETS", "mock_value") - manifest_data = mock_manifest_download.download_manifest(mock_manifest_download) + manifest_data = mock_manifest_download.download_manifest() assert os.path.exists(manifest_data["path"]) # clean up @@ -1359,11 +1473,10 @@ def test_entity_type_checking(self, synapse_store, entity_id, caplog): md = ManifestDownload(synapse_store.syn, entity_id) md._entity_type_checking() if entity_id == "syn27600053": - for record in caplog.records: - assert ( - "You are using entity type: folder. Please provide a file ID" - in record.message - ) + assert ( + "You are using entity type: folder. Please provide a file ID" + in caplog.text + ) class TestManifestUpload: @@ -1427,6 +1540,7 @@ async def test_add_annotations_to_entities_files( files_in_dataset: str, expected_filenames: list[str], expected_entity_ids: list[str], + mock_file: File, ) -> None: """test adding annotations to entities files @@ -1449,39 +1563,39 @@ async def mock_process_store_annos(requests): with patch( "schematic.store.synapse.SynapseStorage.getFilesInStorageDataset", return_value=files_in_dataset, + ), patch( + "schematic.store.synapse.SynapseStorage.format_row_annotations", + return_value=mock_format_row_annos, + new_callable=AsyncMock, + ) as mock_format_row, patch( + "schematic.store.synapse.SynapseStorage._process_store_annos", + return_value=mock_process_store_annos, + new_callable=AsyncMock, + ) as mock_process_store, patch.object( + synapse_store.synapse_entity_tracker, "get", return_value=mock_file ): - with patch( - "schematic.store.synapse.SynapseStorage.format_row_annotations", - return_value=mock_format_row_annos, - new_callable=AsyncMock, - ) as mock_format_row: - with patch( - "schematic.store.synapse.SynapseStorage._process_store_annos", - return_value=mock_process_store_annos, - new_callable=AsyncMock, - ) as mock_process_store: - manifest_df = pd.DataFrame(original_manifest) - - new_df = await synapse_store.add_annotations_to_entities_files( - dmge, - manifest_df, - manifest_record_type="entity", - datasetId="mock id", - hideBlanks=True, - ) + manifest_df = pd.DataFrame(original_manifest) - file_names_lst = new_df["Filename"].tolist() - entity_ids_lst = new_df["entityId"].tolist() + new_df = await synapse_store.add_annotations_to_entities_files( + dmge, + manifest_df, + manifest_record_type="entity", + datasetId="mock id", + hideBlanks=True, + ) + + file_names_lst = new_df["Filename"].tolist() + entity_ids_lst = new_df["entityId"].tolist() - # test entityId and Id columns get added - assert "entityId" in new_df.columns - assert "Id" in new_df.columns - assert file_names_lst == expected_filenames - assert entity_ids_lst == expected_entity_ids + # test entityId and Id columns get added + assert "entityId" in new_df.columns + assert "Id" in new_df.columns + assert file_names_lst == expected_filenames + assert entity_ids_lst == expected_entity_ids - # make sure async function gets called as expected - assert mock_format_row.call_count == len(expected_entity_ids) - assert mock_process_store.call_count == 1 + # make sure async function gets called as expected + assert mock_format_row.call_count == len(expected_entity_ids) + assert mock_process_store.call_count == 1 @pytest.mark.parametrize( "mock_manifest_file_path", @@ -1495,6 +1609,7 @@ def test_upload_manifest_file( helpers: Helpers, synapse_store: SynapseStorage, mock_manifest_file_path: str, + mock_file: File, ) -> None: """test upload manifest file function @@ -1523,9 +1638,9 @@ def test_upload_manifest_file( "entityId": {0: "syn1224", 1: "syn1225", 2: "syn1226"}, } ) - with patch("synapseclient.Synapse.store") as syn_store_mock, patch( - "schematic.store.synapse.synapseutils.copy_functions.changeFileMetaData" - ) as mock_change_file_metadata: + with patch("synapseclient.Synapse.store") as syn_store_mock, patch.object( + synapse_store.synapse_entity_tracker, "get", return_value=mock_file + ): syn_store_mock.return_value.id = "mock manifest id" mock_component_name = "BulkRNA-seqAssay" mock_file_path = helpers.get_data_path(mock_manifest_file_path) @@ -1536,20 +1651,8 @@ def test_upload_manifest_file( restrict_manifest=True, component_name=mock_component_name, ) - if "censored" in mock_manifest_file_path: - file_name = ( - f"synapse_storage_manifest_{mock_component_name}_censored.csv" - ) - else: - file_name = f"synapse_storage_manifest_{mock_component_name}.csv" assert mock_manifest_synapse_file_id == "mock manifest id" - mock_change_file_metadata.assert_called_once_with( - forceVersion=False, - syn=synapse_store.syn, - entity=syn_store_mock.return_value.id, - downloadAs=file_name, - ) @pytest.mark.parametrize("file_annotations_upload", [True, False]) @pytest.mark.parametrize("hide_blanks", [True, False]) @@ -1564,6 +1667,7 @@ def test_upload_manifest_as_csv( manifest_record_type: str, hide_blanks: bool, restrict: bool, + mock_file: File, ) -> None: async def mock_add_annotations_to_entities_files(): return @@ -1582,6 +1686,9 @@ async def mock_add_annotations_to_entities_files(): "schematic.store.synapse.SynapseStorage.format_manifest_annotations" ) as format_manifest_anno_mock, patch.object(synapse_store.syn, "set_annotations"), + patch.object( + synapse_store.synapse_entity_tracker, "get", return_value=mock_file + ), ): manifest_path = helpers.get_data_path("mock_manifests/test_BulkRNAseq.csv") manifest_df = helpers.get_data_frame(manifest_path) @@ -1618,6 +1725,7 @@ def test_upload_manifest_as_table( hide_blanks: bool, restrict: bool, manifest_record_type: str, + mock_file: File, ) -> None: mock_df = pd.DataFrame() @@ -1642,6 +1750,9 @@ async def mock_add_annotations_to_entities_files(): patch( "schematic.store.synapse.SynapseStorage.format_manifest_annotations" ) as format_manifest_anno_mock, + patch.object( + synapse_store.synapse_entity_tracker, "get", return_value=mock_file + ), ): manifest_path = helpers.get_data_path("mock_manifests/test_BulkRNAseq.csv") manifest_df = helpers.get_data_frame(manifest_path) @@ -1682,6 +1793,7 @@ def test_upload_manifest_combo( hide_blanks: bool, restrict: bool, manifest_record_type: str, + mock_file: File, ) -> None: mock_df = pd.DataFrame() manifest_path = helpers.get_data_path("mock_manifests/test_BulkRNAseq.csv") @@ -1708,6 +1820,9 @@ async def mock_add_annotations_to_entities_files(): patch( "schematic.store.synapse.SynapseStorage.format_manifest_annotations" ) as format_manifest_anno_mock, + patch.object( + synapse_store.synapse_entity_tracker, "get", return_value=mock_file + ), ): synapse_store.upload_manifest_combo( dmge, @@ -1756,6 +1871,7 @@ def test_associate_metadata_with_files( expected: str, file_annotations_upload: bool, dmge: DataModelGraphExplorer, + mock_file: File, ) -> None: with ( patch( @@ -1770,6 +1886,9 @@ def test_associate_metadata_with_files( "schematic.store.synapse.SynapseStorage.upload_manifest_combo", return_value="mock_id_entities", ), + patch.object( + synapse_store.synapse_entity_tracker, "get", return_value=mock_file + ), ): manifest_path = "mock_manifests/test_BulkRNAseq.csv" manifest_id = synapse_store.associateMetadataWithFiles( diff --git a/tests/test_utils.py b/tests/test_utils.py index 5b37abe6e..2a0744439 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -2,50 +2,30 @@ import json import logging import os -import shutil import tempfile import time from datetime import datetime -from unittest import mock from pathlib import Path -from typing import Union, Generator -from _pytest.fixtures import FixtureRequest - +from typing import Generator, Union import numpy as np import pandas as pd import pytest -import synapseclient import synapseclient.core.cache as cache +from _pytest.fixtures import FixtureRequest from pandas.testing import assert_frame_equal from synapseclient.core.exceptions import SynapseHTTPError -from schematic.models.validate_manifest import ValidateManifest from schematic.models.metadata import MetadataModel - -from schematic.schemas.data_model_parser import DataModelParser -from schematic.schemas.data_model_graph import DataModelGraph, DataModelGraphExplorer +from schematic.models.validate_manifest import ValidateManifest +from schematic.schemas.data_model_graph import DataModelGraph +from schematic.schemas.data_model_json_schema import DataModelJSONSchema from schematic.schemas.data_model_jsonld import ( - DataModelJsonLD, - BaseTemplate, - PropertyTemplate, ClassTemplate, + PropertyTemplate, + convert_graph_to_jsonld, ) -from schematic.schemas.data_model_json_schema import DataModelJSONSchema - -from schematic.schemas.data_model_relationships import DataModelRelationships -from schematic.schemas.data_model_jsonld import DataModelJsonLD, convert_graph_to_jsonld - -from schematic.exceptions import ( - MissingConfigValueError, - MissingConfigAndArgumentValueError, -) -from schematic import LOADER -from schematic.exceptions import ( - MissingConfigAndArgumentValueError, - MissingConfigValueError, -) - +from schematic.schemas.data_model_parser import DataModelParser from schematic.utils import cli_utils, df_utils, general, io_utils, validate_utils from schematic.utils.df_utils import load_df from schematic.utils.general import ( @@ -55,25 +35,23 @@ entity_type_mapping, ) from schematic.utils.schema_utils import ( + check_for_duplicate_components, + check_if_display_name_is_valid_label, export_schema, - get_property_label_from_display_name, + extract_component_validation_rules, get_class_label_from_display_name, - strip_context, + get_component_name_rules, + get_individual_rules, + get_json_schema_log_file_path, get_label_from_display_name, + get_property_label_from_display_name, get_schema_label, get_stripped_label, - check_if_display_name_is_valid_label, - get_individual_rules, - get_component_name_rules, - parse_component_validation_rules, parse_single_set_validation_rules, parse_validation_rules, - extract_component_validation_rules, - check_for_duplicate_components, - get_json_schema_log_file_path, + strip_context, ) - logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) @@ -1099,6 +1077,7 @@ def test_validate_property_schema(self, helpers): assert error is None + @pytest.mark.single_process_execution @pytest.mark.parametrize( ("manifest", "model", "root_node"), [ diff --git a/tests/test_validation.py b/tests/test_validation.py index cdd6766c0..c16a95bb7 100644 --- a/tests/test_validation.py +++ b/tests/test_validation.py @@ -36,10 +36,6 @@ def get_rule_combinations(): class TestManifestValidation: - # check if suite has been created. If so, delete it - if os.path.exists("great_expectations/expectations/Manifest_test_suite.json"): - os.remove("great_expectations/expectations/Manifest_test_suite.json") - @pytest.mark.parametrize( ("model_name", "manifest_name", "root_node"), [ diff --git a/tests/test_validator.py b/tests/test_validator.py deleted file mode 100644 index 6f5bd95f0..000000000 --- a/tests/test_validator.py +++ /dev/null @@ -1,122 +0,0 @@ -from io import StringIO -import json -import networkx as nx -import os -import pandas as pd -import pytest -import logging - - -from schematic.schemas.data_model_parser import DataModelParser -from schematic.schemas.data_model_graph import DataModelGraph -from schematic.schemas.data_model_validator import DataModelValidator -from schematic.schemas.data_model_jsonld import DataModelJsonLD, convert_graph_to_jsonld - - -logging.basicConfig(level=logging.DEBUG) -logger = logging.getLogger(__name__) - - -def graph_data_model_func(helpers, data_model_name): - path_to_data_model = helpers.get_data_path(data_model_name) - - # Instantiate Parser - data_model_parser = DataModelParser(path_to_data_model=path_to_data_model) - - # Parse Model - parsed_data_model = data_model_parser.parse_model() - - # Convert parsed model to graph - # Instantiate DataModelGraph - data_model_grapher = DataModelGraph(parsed_data_model) - - # Generate graph - graph_data_model = data_model_grapher.graph - - return graph_data_model - - -class TestDataModelValidator: - def test_check_blacklisted_characters(self, helpers): - # Get graph data model - graph_data_model = graph_data_model_func( - helpers, data_model_name="validator_test.model.csv" - ) - - # Instantiate Data Model Validator - DMV = DataModelValidator(graph_data_model) - - # Run validation - validator_errors = DMV.check_blacklisted_characters() - - # Expected Error - expected_error = [ - "Node: Patient) contains a blacklisted character(s): ), they will be striped if used in Synapse annotations.", - "Node: Patient ID. contains a blacklisted character(s): ., they will be striped if used in Synapse annotations.", - "Node: Sex- contains a blacklisted character(s): -, they will be striped if used in Synapse annotations.", - "Node: Year of Birth( contains a blacklisted character(s): (, they will be striped if used in Synapse annotations.", - "Node: Bulk RNA-seq Assay contains a blacklisted character(s): -, they will be striped if used in Synapse annotations.", - ] - - assert expected_error == validator_errors - - def test_check_reserved_names(self, helpers): - # Get graph data model - graph_data_model = graph_data_model_func( - helpers, data_model_name="validator_test.model.csv" - ) - - # Instantiate Data Model Validator - DMV = DataModelValidator(graph_data_model) - - # Run validation - validator_errors = DMV.check_reserved_names() - - # Expected Error - expected_error = [ - "Your data model entry name: EntityId overlaps with the reserved name: entityId. Please change this name in your data model." - ] - assert expected_error == validator_errors - - def test_check_graph_has_required_node_fields(self, helpers): - # Get graph data model - graph_data_model = graph_data_model_func( - helpers, data_model_name="validator_test.model.csv" - ) - - # Remove a field from an entry graph - del graph_data_model.nodes["Cancer"]["label"] - - # Instantiate Data Model Validator - DMV = DataModelValidator(graph_data_model) - - # Run validation - validator_errors = DMV.check_graph_has_required_node_fields() - - # Expected Error - expected_error = [ - "For entry: Cancer, the required field label is missing in the data model graph, please double check your model and generate the graph again." - ] - assert expected_error == validator_errors - - def test_dag(self, helpers): - # TODO: The schema validator currently doesn't catch the Diagnosis-Diagnosis self loop. - # It is an expected error but it will need to be decided if the validator should prevent or allow such self loops - - # Get graph data model - graph_data_model = graph_data_model_func( - helpers, data_model_name="validator_dag_test.model.csv" - ) - - # Instantiate Data Model Validator - DMV = DataModelValidator(graph_data_model) - - # Run validation - validator_errors = DMV.check_is_dag() - - # nodes could be in different order so need to account for that - expected_errors = [ - "Schematic requires models be a directed acyclic graph (DAG). Please inspect your model." - ] - - assert validator_errors[0] in expected_errors diff --git a/tests/unit/test_data_model_validator.py b/tests/unit/test_data_model_validator.py new file mode 100644 index 000000000..33ea63575 --- /dev/null +++ b/tests/unit/test_data_model_validator.py @@ -0,0 +1,407 @@ +"""Unit testing for the ValidateAttribute class""" + +from typing import Generator, Any, Tuple, Iterable +import pytest + +from networkx import MultiDiGraph # type: ignore + +from schematic.schemas.data_model_validator import ( + DataModelValidator, + Node, + get_node_labels_from, + get_missing_fields_from, + check_characters_in_node_display_name, + match_node_names_with_reserved_names, + create_reserve_name_error_messages, + create_blacklisted_characters_error_message, + create_missing_fields_error_messages, +) +from schematic.schemas.data_model_parser import DataModelParser +from schematic.schemas.data_model_graph import DataModelGraph + +# pylint: disable=protected-access + + +@pytest.fixture(name="test_dmv") +def fixture_test_dmv() -> Generator[MultiDiGraph, None, None]: + """Yield a DataModelValidator object using test data model""" + path_to_data_model = "tests/data/validator_test.model.csv" + data_model_parser = DataModelParser(path_to_data_model=path_to_data_model) + parsed_data_model = data_model_parser.parse_model() + + # Convert parsed model to graph + data_model_grapher = DataModelGraph(parsed_data_model) + + # Generate graph + graph_data_model = data_model_grapher.graph + + yield DataModelValidator(graph_data_model) + + +@pytest.fixture(name="test_dmv_with_missing_field") +def fixture_test_dmv_with_missing_field() -> Generator[MultiDiGraph, None, None]: + """Yield a DataModelValidator object using test data model""" + path_to_data_model = "tests/data/validator_test.model.csv" + data_model_parser = DataModelParser(path_to_data_model=path_to_data_model) + parsed_data_model = data_model_parser.parse_model() + + # Convert parsed model to graph + data_model_grapher = DataModelGraph(parsed_data_model) + + # Generate graph + graph_data_model = data_model_grapher.graph + + # remove needed field to trigger an error message + del graph_data_model.nodes["Cancer"]["label"] + yield DataModelValidator(graph_data_model) + + +@pytest.fixture(name="test_dmv_not_acyclic") +def fixture_test_dmv_not_acyclic() -> Generator[MultiDiGraph, None, None]: + """Yield a DataModelValidator object using test data model""" + path_to_data_model = "tests/data/validator_dag_test.model.csv" + data_model_parser = DataModelParser(path_to_data_model=path_to_data_model) + parsed_data_model = data_model_parser.parse_model() + + # Convert parsed model to graph + data_model_grapher = DataModelGraph(parsed_data_model) + + # Generate graph + graph_data_model = data_model_grapher.graph + + # remove needed field to trigger an error message + del graph_data_model.nodes["Cancer"]["label"] + yield DataModelValidator(graph_data_model) + + +@pytest.fixture(name="empty_dmv") +def fixture_empty_dmv() -> Generator[DataModelValidator, None, None]: + """Yield an empty DataModelValidator object""" + yield DataModelValidator(MultiDiGraph()) + + +class TestNode: # pylint: disable=too-few-public-methods + """Testing for node class""" + + def test_init(self) -> None: + """Test for Node.__init__""" + assert Node("x", {"displayName": "y"}) + # Nodes must have a 'displayName' key in their fields dict + with pytest.raises(ValueError): + Node("x", {}) + + +class TestDataModelValidatorHelpers: + """Testing for DataModelValidator helper functions""" + + @pytest.mark.parametrize( + "input_dict, expected_list", + [ + # These return empty lists + ({}, []), + ({"x": "y"}, []), + ({"x": {}}, []), + # Only values that are dicts are parsed. + # Any dicts with keys named node_label, the value is collected + ({"x": {"node_label": "A"}}, ["A"]), + ({"x": {"node_label": "A"}, "y": {"node_label": "B"}}, ["A", "B"]), + ], + ) + def test_get_node_labels_from(self, input_dict: dict, expected_list: list) -> None: + """Tests for get_node_labels_from""" + assert get_node_labels_from(input_dict) == expected_list + + @pytest.mark.parametrize( + "input_nodes, input_fields, expected_list", + [ + # If there are no nodes or no required fields, nothing will be returned + ([], [], []), + ([], ["field1"], []), + ([Node("node1", {"displayName": "x"})], [], []), + # For each node, if it has all required fields, nothing will be returned + ([Node("node1", {"displayName": "x"})], ["displayName"], []), + ( + [ + Node("node1", {"displayName": "x", "field2": "y"}), + Node("node2", {"displayName": "x", "field2": "y"}), + ], + ["field2"], + [], + ), + # For each node, if it is missing a required field, it is returned + ( + [Node("node1", {"displayName": "x"})], + ["field1", "field2"], + [("node1", "field1"), ("node1", "field2")], + ), + ], + ) + def test_get_missing_fields_from( + self, + input_nodes: list[Node], + input_fields: list, + expected_list: list[Tuple[Any, Any]], + ) -> None: + """Tests for get_missing_fields_from""" + assert get_missing_fields_from(input_nodes, input_fields) == expected_list + + @pytest.mark.parametrize( + "input_tuples, expected_msgs", + [ + # If there are either no nodes, or no reserved names, nothing is returned + ([], []), + ( + [("node1", "field1")], + [ + ( + "For entry: node1, the required field field1 is missing in the data " + "model graph, please double check your model and generate the graph again." + ) + ], + ), + ( + [("node1", "field1"), ("node1", "field2")], + [ + ( + "For entry: node1, the required field field1 is missing in the data model " + "graph, please double check your model and generate the graph again." + ), + ( + "For entry: node1, the required field field2 is missing in the data model " + "graph, please double check your model and generate the graph again." + ), + ], + ), + ], + ) + def test_create_missing_fields_error_messages( + self, input_tuples: list[Tuple[str, str]], expected_msgs: list[str] + ) -> None: + """Tests for create_missing_fields_error_messages""" + assert create_missing_fields_error_messages(input_tuples) == expected_msgs + + @pytest.mark.parametrize( + "input_nodes, input_chars", + [ + # If there are no nodes or blacklisted characters, nothing will be returned + ([], []), + # If all nodes have are formatted correctly, and the 'displayName' field has + # no black listed characters, nothing will be returned + ([Node("node1", {"displayName": "x"})], []), + ([Node("node1", {"displayName": "x"})], ["y"]), + ], + ) + def test_check_characters_in_node_display_name_no_output( + self, + input_nodes: list[Node], + input_chars: list[str], + ) -> None: + """Tests for check_characters_in_node_display_name""" + assert not check_characters_in_node_display_name(input_nodes, input_chars) + + @pytest.mark.parametrize( + "input_nodes, input_chars", + [ + # If all nodes have are formatted correctly, and the 'displayName' field has + # black listed characters, those will be returned + ([Node("node1", {"displayName": "xyz"})], ["x"]), + ([Node("node1", {"displayName": "xyz"})], ["x", "y"]), + ([Node("node1", {"displayName": "xyz"})], ["x", "y", "a"]), + ], + ) + def test_check_characters_in_node_display_name_with_output( + self, + input_nodes: list[Node], + input_chars: list[str], + ) -> None: + """Tests for check_characters_in_node_display_name""" + assert check_characters_in_node_display_name(input_nodes, input_chars) + + @pytest.mark.parametrize( + "input_chars, input_name, expected_msg", + [ + ( + [], + "", + ( + "Node: contains a blacklisted character(s): , they will be striped if " + "used in Synapse annotations." + ), + ), + ( + ["x", "y"], + "node1", + ( + "Node: node1 contains a blacklisted character(s): x,y, they will be striped " + "if used in Synapse annotations." + ), + ), + ], + ) + def test_create_blacklisted_characters_error_msg( + self, input_chars: list[str], input_name: str, expected_msg: str + ) -> None: + """Tests for create_blacklisted_characters_error_msg""" + assert ( + create_blacklisted_characters_error_message(input_chars, input_name) + == expected_msg + ) + + @pytest.mark.parametrize( + "input_nodes, input_names, expected_list", + [ + # If there are either no nodes, or no reserved names, nothing is returned + ([], [], []), + (["node1"], [], []), + ([], ["x"], []), + # If there are matches between a node name and a reserved name (after lowering + # the case of both) return any matches + (["node1"], ["node1"], [("node1", "node1")]), + (["Node1"], ["node1"], [("node1", "Node1")]), + (["node1"], ["Node1"], [("Node1", "node1")]), + ], + ) + def test_match_node_names_with_reserved_names( + self, + input_nodes: Iterable[str], + input_names: Iterable[str], + expected_list: list[Tuple[str, str]], + ) -> None: + """Tests for match_node_names_with_reserved_names""" + assert ( + match_node_names_with_reserved_names(input_nodes, input_names) + == expected_list + ) + + @pytest.mark.parametrize( + "input_tuples, expected_msgs", + [ + # If there are either no nodes, or no reserved names, nothing is returned + ([], []), + ( + [("node1", "Node1")], + [ + ( + "Your data model entry name: Node1 overlaps with the reserved name: node1. " + "Please change this name in your data model." + ) + ], + ), + ( + [("node1", "Node1"), ("node2", "Node2")], + [ + ( + "Your data model entry name: Node1 overlaps with the reserved name: node1. " + "Please change this name in your data model." + ), + ( + "Your data model entry name: Node2 overlaps with the reserved name: node2. " + "Please change this name in your data model." + ), + ], + ), + ], + ) + def test_create_reserve_name_error_msgs( + self, input_tuples: list[Tuple[str, str]], expected_msgs: list[str] + ) -> None: + """Tests for create_reserve_name_error_msgs""" + assert create_reserve_name_error_messages(input_tuples) == expected_msgs + + +class TestDataModelValidator: + """Testing for DataModelValidator class""" + + def test_run_checks( + self, test_dmv: DataModelValidator, empty_dmv: DataModelValidator + ) -> None: + """Tests for DataModelValidator.run_checks""" + errors, warnings = test_dmv.run_checks() + assert errors + assert warnings + errors, warnings = empty_dmv.run_checks() + assert not errors + assert not warnings + + def test__run_cycles( + self, test_dmv: DataModelValidator, test_dmv_not_acyclic: DataModelValidator + ) -> None: + """Tests for DataModelValidator._run_cycles""" + test_dmv._run_cycles() + test_dmv_not_acyclic._run_cycles() + + def test__check_is_dag( + self, test_dmv: DataModelValidator, test_dmv_not_acyclic: DataModelValidator + ) -> None: + """Tests for DataModelValidator._check_is_dag""" + errors = test_dmv._check_is_dag() + assert not errors + errors = test_dmv_not_acyclic._check_is_dag() + # This test doesn't cover all of this method please see + # https://sagebionetworks.jira.com/browse/FDS-2529 + assert errors == [ + ( + "Schematic requires models be a directed acyclic graph (DAG). " + "Please inspect your model." + ) + ] + + def test__check_graph_has_required_node_fields( + self, + test_dmv: DataModelValidator, + test_dmv_with_missing_field: DataModelValidator, + ) -> None: + """Tests for DataModelValidator._check_graph_has_required_node_fields""" + errors = test_dmv._check_graph_has_required_node_fields() + assert not errors + errors = test_dmv_with_missing_field._check_graph_has_required_node_fields() + assert errors == [ + ( + "For entry: Cancer, the required field label is missing in the data model graph, " + "please double check your model and generate the graph again." + ) + ] + + def test__check_blacklisted_characters( + self, test_dmv: DataModelValidator, empty_dmv: DataModelValidator + ) -> None: + """Tests for DataModelValidator._check_blacklisted_characters""" + errors = test_dmv._check_blacklisted_characters() + assert errors == [ + ( + "Node: Patient) contains a blacklisted character(s): ), " + "they will be striped if used in Synapse annotations." + ), + ( + "Node: Patient ID. contains a blacklisted character(s): ., " + "they will be striped if used in Synapse annotations." + ), + ( + "Node: Sex- contains a blacklisted character(s): -, " + "they will be striped if used in Synapse annotations." + ), + ( + "Node: Year of Birth( contains a blacklisted character(s): (, " + "they will be striped if used in Synapse annotations." + ), + ( + "Node: Bulk RNA-seq Assay contains a blacklisted character(s): -, " + "they will be striped if used in Synapse annotations." + ), + ] + errors = empty_dmv._check_blacklisted_characters() + assert not errors + + def test__check_reserved_names( + self, test_dmv: DataModelValidator, empty_dmv: DataModelValidator + ) -> None: + """Tests for DataModelValidator._check_reserved_names""" + errors = test_dmv._check_reserved_names() + assert errors == [ + ( + "Your data model entry name: EntityId overlaps with the reserved name: entityId. " + "Please change this name in your data model." + ) + ] + errors = empty_dmv._check_reserved_names() + assert not errors diff --git a/tests/unit/test_io_utils.py b/tests/unit/test_io_utils.py new file mode 100644 index 000000000..ce868aac3 --- /dev/null +++ b/tests/unit/test_io_utils.py @@ -0,0 +1,96 @@ +import asyncio +import os +import tempfile + +from schematic.utils.general import create_temp_folder +from schematic.utils.io_utils import cleanup_temporary_storage + + +class TestCleanup: + async def test_cleanup_temporary_storage_nothing_to_cleanup(self) -> None: + # GIVEN a temporary folder that has a file that is not older than the time delta + temp_folder = create_temp_folder(path=tempfile.gettempdir()) + + # AND A File that is not older than the time delta + with open(os.path.join(temp_folder, "file.txt"), "w") as f: + f.write("hello world") + + assert os.path.exists(temp_folder) + assert os.path.exists(os.path.join(temp_folder, "file.txt")) + + time_delta_seconds = 3600 + + # WHEN I call the cleanup function + cleanup_temporary_storage( + temporary_storage_directory=temp_folder, + time_delta_seconds=time_delta_seconds, + ) + + # THEN the folder should still exist + assert os.path.exists(temp_folder) + + # AND the file should still exist + assert os.path.exists(os.path.join(temp_folder, "file.txt")) + + async def test_cleanup_temporary_storage_file_to_cleanup(self) -> None: + # GIVEN a temporary folder that has a file that will be older than the time delta + temp_folder = create_temp_folder(path=tempfile.gettempdir()) + + # AND A File that is older than the time delta + with open(os.path.join(temp_folder, "file.txt"), "w") as f: + f.write("hello world") + + assert os.path.exists(temp_folder) + assert os.path.exists(os.path.join(temp_folder, "file.txt")) + + time_delta_seconds = 1 + + # AND I wait for the time delta + await asyncio.sleep(time_delta_seconds) + + # WHEN I call the cleanup function + cleanup_temporary_storage( + temporary_storage_directory=temp_folder, + time_delta_seconds=time_delta_seconds, + ) + + # THEN the folder should still exist + assert os.path.exists(temp_folder) + + # AND the file should not exist + assert not os.path.exists(os.path.join(temp_folder, "file.txt")) + + async def test_cleanup_temporary_storage_nested_file_to_cleanup(self) -> None: + # GIVEN a temporary folder that has a file that will be older than the time delta + temp_folder = create_temp_folder(path=tempfile.gettempdir()) + + # AND a nested temporary folder + temp_folder_2 = create_temp_folder(path=temp_folder) + + # AND A File that is older than the time delta + with open(os.path.join(temp_folder_2, "file.txt"), "w") as f: + f.write("hello world") + + assert os.path.exists(temp_folder) + assert os.path.exists(temp_folder_2) + assert os.path.exists(os.path.join(temp_folder_2, "file.txt")) + + time_delta_seconds = 1 + + # AND I wait for the time delta + await asyncio.sleep(time_delta_seconds) + + # WHEN I call the cleanup function + cleanup_temporary_storage( + temporary_storage_directory=temp_folder, + time_delta_seconds=time_delta_seconds, + ) + + # THEN the folder should still exist + assert os.path.exists(temp_folder) + + # AND the nested folder should not exist + assert not os.path.exists(temp_folder_2) + + # AND the file should not exist + assert not os.path.exists(os.path.join(temp_folder_2, "file.txt")) diff --git a/tests/unit/test_validate_attribute.py b/tests/unit/test_validate_attribute.py index d782fab12..4e05ddc98 100644 --- a/tests/unit/test_validate_attribute.py +++ b/tests/unit/test_validate_attribute.py @@ -3,9 +3,10 @@ from typing import Generator from unittest.mock import Mock, patch +from jsonschema import ValidationError +from pandas import Series, DataFrame, concat import numpy as np import pytest -from pandas import DataFrame, Series, concat import schematic.models.validate_attribute from schematic.models.validate_attribute import GenerateError, ValidateAttribute @@ -73,6 +74,15 @@ } ) +TEST_DF3 = DataFrame( + { + "PatientID": ["A", "A", "A", "B", "C"], + "component": ["comp1", "comp1", "comp1", "comp1", "comp1"], + "id": ["id1", "id2", "id3", "id4", "id5"], + "entityid": ["x", "x", "x", "x", "x"], + } +) + TEST_DF_MISSING_VALS = DataFrame( { "PatientID": [np.isnan, ""], @@ -155,10 +165,10 @@ def fixture_va_obj( yield ValidateAttribute(dmge) -@pytest.fixture(name="cross_val_df1") -def fixture_cross_val_df1() -> Generator[DataFrame, None, None]: +@pytest.fixture(name="test_df1") +def fixture_test_df1() -> Generator[DataFrame, None, None]: """Yields a dataframe""" - df = DataFrame( + yield DataFrame( { "PatientID": ["A", "B", "C"], "component": ["comp1", "comp1", "comp1"], @@ -166,44 +176,10 @@ def fixture_cross_val_df1() -> Generator[DataFrame, None, None]: "entityid": ["x", "x", "x"], } ) - yield df - - -@pytest.fixture(name="cross_val_df2") -def fixture_cross_val_df2(cross_val_df1: DataFrame) -> Generator[DataFrame, None, None]: - """Yields dataframe df1 with an extra row""" - df = concat( - [ - cross_val_df1, - DataFrame( - { - "PatientID": ["D"], - "component": ["comp1"], - "id": ["id4"], - "entityid": ["x"], - } - ), - ] - ) - yield df - - -@pytest.fixture(name="cross_val_df3") -def fixture_cross_val_df3() -> Generator[DataFrame, None, None]: - """Yields empty dataframe""" - df = DataFrame( - { - "PatientID": [], - "component": [], - "id": [], - "entityid": [], - } - ) - yield df -@pytest.fixture(name="cross_val_col_names") -def fixture_cross_val_col_names() -> Generator[dict[str, str], None, None]: +@pytest.fixture(name="test_df_col_names") +def fixture_test_df_col_names() -> Generator[dict[str, str], None, None]: """ Yields: Generator[dict[str, str], None, None]: A dicitonary of column names @@ -304,6 +280,104 @@ def test_generate_filename_error_unsupported_error_type( error_type="unsupported error type", ) + @pytest.mark.parametrize( + "input_rule, input_num, input_name, input_entry, expected_error, expected_warning", + [ + ( + "x", + 0, + "Patient", + "y", + [], + [ + 0, + "Patient", + "On row 0 the attribute Patient does not contain the proper value type x.", + "y", + ], + ), + ( + "x warning", + 0, + "Patient", + "y", + [], + [ + 0, + "Patient", + "On row 0 the attribute Patient does not contain the proper value type x.", + "y", + ], + ), + ( + "x error", + 0, + "Patient", + "y", + [ + 0, + "Patient", + "On row 0 the attribute Patient does not contain the proper value type x.", + "y", + ], + [], + ), + ], + ) + def test_generate_type_error( + self, + dmge: DataModelGraphExplorer, + input_rule: str, + input_num: int, + input_name: str, + input_entry: str, + expected_error: list[str], + expected_warning: list[str], + ) -> None: + """Testing for GenerateError.generate_type_error""" + error, warning = GenerateError.generate_type_error( + val_rule=input_rule, + row_num=input_num, + attribute_name=input_name, + invalid_entry=input_entry, + dmge=dmge, + ) + import logging + + logging.warning(error) + logging.warning(warning) + assert error == expected_error + assert warning == expected_warning + + @pytest.mark.parametrize( + "input_rule, input_num, input_name, input_entry, exception", + [ + # Empty rule or entry causes a key error + ("", 0, "x", "x", KeyError), + ("x", 0, "x", "", KeyError), + # Empty attribute causes an index error + ("x", 0, "", "x", IndexError), + ], + ) + def test_generate_type_error_exceptions( + self, + dmge: DataModelGraphExplorer, + input_rule: str, + input_num: int, + input_name: str, + input_entry: str, + exception: Exception, + ) -> None: + """Testing for GenerateError.generate_type_error""" + with pytest.raises(exception): + GenerateError.generate_type_error( + val_rule=input_rule, + row_num=input_num, + attribute_name=input_name, + invalid_entry=input_entry, + dmge=dmge, + ) + class TestValidateAttributeObject: """Testing for ValidateAttribute class with all Synapse calls mocked""" @@ -314,10 +388,10 @@ class TestValidateAttributeObject: @pytest.mark.parametrize("series", EXACTLY_ATLEAST_PASSING_SERIES) @pytest.mark.parametrize("rule", MATCH_ATLEAST_ONE_SET_RULES) - def test_cross_validation_match_atleast_one_set_rules_passing( + def test_cross_validation_match_atleast_one_set_passing_one_df( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, series: Series, rule: str, ): @@ -328,16 +402,16 @@ def test_cross_validation_match_atleast_one_set_rules_passing( with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": test_df1}, ): assert va_obj.cross_validation(rule, series) == ([], []) @pytest.mark.parametrize("series", EXACTLY_ATLEAST_PASSING_SERIES) @pytest.mark.parametrize("rule", MATCH_EXACTLY_ONE_SET_RULES) - def test_cross_validation_match_exactly_one_set_rules_passing( + def test_cross_validation_match_exactly_one_set_passing_one_df( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, series: Series, rule: str, ): @@ -348,7 +422,7 @@ def test_cross_validation_match_exactly_one_set_rules_passing( with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": test_df1}, ): assert va_obj.cross_validation(rule, series) == ([], []) @@ -362,10 +436,10 @@ def test_cross_validation_match_exactly_one_set_rules_passing( ], ) @pytest.mark.parametrize("rule", MATCH_ATLEAST_ONE_SET_RULES) - def test_cross_validation_match_atleast_one_set_rules_errors( + def test_cross_validation_match_atleast_one_set_errors_one_df( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, series: Series, rule: str, ): @@ -376,7 +450,7 @@ def test_cross_validation_match_atleast_one_set_rules_errors( with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": test_df1}, ): errors, warnings = va_obj.cross_validation(rule, series) if rule.endswith("error"): @@ -394,10 +468,10 @@ def test_cross_validation_match_atleast_one_set_rules_errors( ], ) @pytest.mark.parametrize("rule", MATCH_EXACTLY_ONE_SET_RULES) - def test_cross_validation_match_exactly_one_set_rules_errors( + def test_cross_validation_match_exactly_one_set_errors_one_df( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, series: Series, rule: str, ): @@ -408,7 +482,7 @@ def test_cross_validation_match_exactly_one_set_rules_errors( with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1, "syn2": cross_val_df1}, + return_value={"syn1": test_df1, "syn2": test_df1}, ): errors, warnings = va_obj.cross_validation(rule, series) if rule.endswith("error"): @@ -429,10 +503,10 @@ def test_cross_validation_match_exactly_one_set_rules_errors( ], ) @pytest.mark.parametrize("rule", MATCH_NONE_SET_RULES) - def test_cross_validation_match_none_set_rules_passing( + def test_cross_validation_match_none_set_passing_one_df( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, series: Series, rule: str, ): @@ -443,7 +517,7 @@ def test_cross_validation_match_none_set_rules_passing( with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": test_df1}, ): assert va_obj.cross_validation(rule, series) == ([], []) @@ -457,10 +531,10 @@ def test_cross_validation_match_none_set_rules_passing( ], ) @pytest.mark.parametrize("rule", MATCH_NONE_SET_RULES) - def test_cross_validation_match_none_set_rules_errors( + def test_cross_validation_match_none_set_errors_one_df( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, series: Series, rule: str, ): @@ -471,7 +545,7 @@ def test_cross_validation_match_none_set_rules_errors( with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": test_df1}, ): errors, warnings = va_obj.cross_validation(rule, series) if rule.endswith("error"): @@ -482,6 +556,7 @@ def test_cross_validation_match_none_set_rules_errors( assert errors == [] @pytest.mark.parametrize("rule", MATCH_ATLEAST_ONE_VALUE_RULES) + @pytest.mark.parametrize("target_manifest", [TEST_DF1, TEST_DF3]) @pytest.mark.parametrize( "tested_column", [ @@ -493,12 +568,12 @@ def test_cross_validation_match_none_set_rules_errors( (["A", "B", "C", "C"]), ], ) - def test_cross_validation_value_match_atleast_one_rules_passing( + def test_cross_validation_match_atleast_one_value_passing_one_df( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, rule: str, tested_column: list, + target_manifest: DataFrame, ): """ Tests ValidateAttribute.cross_validation @@ -507,7 +582,7 @@ def test_cross_validation_value_match_atleast_one_rules_passing( with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": target_manifest}, ): assert va_obj.cross_validation(rule, Series(tested_column)) == ([], []) @@ -522,10 +597,10 @@ def test_cross_validation_value_match_atleast_one_rules_passing( Series([1], index=[0], name="PatientID"), ], ) - def test_cross_validation_value_match_atleast_one_rules_errors( + def test_cross_validation_match_atleast_one_value_errors_one_df( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, rule: str, tested_column: Series, ): @@ -536,7 +611,7 @@ def test_cross_validation_value_match_atleast_one_rules_errors( with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": test_df1}, ): errors, warnings = va_obj.cross_validation(rule, tested_column) if rule.endswith("error"): @@ -548,31 +623,38 @@ def test_cross_validation_value_match_atleast_one_rules_errors( @pytest.mark.parametrize("rule", MATCH_EXACTLY_ONE_VALUE_RULES) @pytest.mark.parametrize( - "tested_column", + "tested_column, target_manifest", [ - ([]), - (["A"]), - (["A", "A"]), - (["A", "B"]), - (["A", "B", "C"]), - (["A", "B", "C", "C"]), + ([], TEST_DF1), + ([], TEST_DF3), + (["C"], TEST_DF1), + (["C"], TEST_DF3), + (["C", "C"], TEST_DF1), + (["C", "C"], TEST_DF3), + (["A"], TEST_DF1), + (["A", "A"], TEST_DF1), + (["A", "B"], TEST_DF1), + (["A", "B", "C"], TEST_DF1), + (["A", "B", "C", "C"], TEST_DF1), ], ) - def test_cross_validation_match_exactly_one_value_rules_passing( + def test_cross_validation_match_exactly_one_value_passing_one_df( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, rule: str, tested_column: list, + target_manifest: DataFrame, ): """ Tests ValidateAttribute.cross_validation These tests show what columns pass for matchExactlyOne + The first group are ones that pass for TEST_DF1 and TEST_DF3 + The second group are those that pass only for test """ with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": target_manifest}, ): assert va_obj.cross_validation(rule, Series(tested_column)) == ([], []) @@ -586,10 +668,10 @@ def test_cross_validation_match_exactly_one_value_rules_passing( Series([1], index=[0], name="PatientID"), ], ) - def test_cross_validation_value_match_exactly_one_rules_errors( + def test_cross_validation_match_exactly_one_value_errors_one_df( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, rule: str, tested_column: Series, ): @@ -600,7 +682,7 @@ def test_cross_validation_value_match_exactly_one_rules_errors( with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": test_df1}, ): errors, warnings = va_obj.cross_validation(rule, tested_column) if rule.endswith("error"): @@ -615,10 +697,10 @@ def test_cross_validation_value_match_exactly_one_rules_errors( "tested_column", [([]), (["D"]), (["D", "D"]), (["D", "F"]), ([1]), ([np.nan])], ) - def test_cross_validation_match_none_value_rules_passing( + def test_cross_validation_match_none_value_passing_one_df( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, rule: str, tested_column: list, ): @@ -629,7 +711,7 @@ def test_cross_validation_match_none_value_rules_passing( with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": test_df1}, ): assert va_obj.cross_validation(rule, Series(tested_column)) == ([], []) @@ -642,10 +724,10 @@ def test_cross_validation_match_none_value_rules_passing( Series(["A", "A"], index=[0, 1], name="PatientID"), ], ) - def test_cross_validation_value_match_none_rules_errors( + def test_cross_validation_value_match_none_errors_one_df( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, rule: str, tested_column: Series, ): @@ -656,7 +738,7 @@ def test_cross_validation_value_match_none_rules_errors( with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": test_df1}, ): errors, warnings = va_obj.cross_validation(rule, tested_column) if rule.endswith("error"): @@ -839,7 +921,7 @@ def test__run_validation_across_target_manifests_return_msg( @pytest.mark.parametrize("rule", ALL_VALUE_RULES) def test__run_validation_across_target_manifests_value_scope( - self, va_obj: ValidateAttribute, cross_val_df1: DataFrame, rule: str + self, va_obj: ValidateAttribute, test_df1: DataFrame, rule: str ) -> None: """Tests for ValidateAttribute._run_validation_across_target_manifests with value rule""" @@ -847,7 +929,7 @@ def test__run_validation_across_target_manifests_value_scope( with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": test_df1}, ): _, validation_output = va_obj._run_validation_across_target_manifests( rule_scope="value", @@ -882,7 +964,7 @@ def test__run_validation_across_target_manifests_value_scope( def test__run_validation_across_target_manifests_match_atleast_exactly_with_one_target( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, input_column: list, missing_ids: list[str], present_ids: list[str], @@ -895,12 +977,12 @@ def test__run_validation_across_target_manifests_match_atleast_exactly_with_one_ This shows that these rules behave the same. If all values in the column match the target manifest, the manifest id gets added to the present ids list. - Otherwise the maniferst id gets added to the missing ids list + Otherwise the manifest id gets added to the missing ids list """ with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": test_df1}, ): _, validation_output = va_obj._run_validation_across_target_manifests( rule_scope="set", @@ -927,7 +1009,7 @@ def test__run_validation_across_target_manifests_match_atleast_exactly_with_one_ def test__run_validation_across_target_manifests_match_atleast_exactly_with_two_targets( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, input_column: list, missing_ids: list[str], present_ids: list[str], @@ -938,13 +1020,13 @@ def test__run_validation_across_target_manifests_match_atleast_exactly_with_two_ Tests for ValidateAttribute._run_validation_across_target_manifests using matchAtleastOne set and matchExactlyOne rule. This shows these rules behave the same. - This also shows that when thare are multiple target mnaifests they both get added to + This also shows that when there are multiple target manifests they both get added to either the present of missing manifest ids """ with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1, "syn2": cross_val_df1}, + return_value={"syn1": test_df1, "syn2": test_df1}, ): _, validation_output = va_obj._run_validation_across_target_manifests( rule_scope="set", @@ -974,7 +1056,7 @@ def test__run_validation_across_target_manifests_match_atleast_exactly_with_two_ def test__run_validation_across_target_manifests_set_rules_match_none_with_one_target( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, input_column: list, missing_ids: list[str], present_ids: list[str], @@ -985,13 +1067,13 @@ def test__run_validation_across_target_manifests_set_rules_match_none_with_one_t Tests for ValidateAttribute._run_validation_across_target_manifests using matchNone set rule When there are nt matching values, no id get added - When there are mathcing values the id gets added to the repeat ids + When there are matching values the id gets added to the repeat ids """ with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1}, + return_value={"syn1": test_df1}, ): _, validation_output = va_obj._run_validation_across_target_manifests( rule_scope="set", @@ -1021,7 +1103,7 @@ def test__run_validation_across_target_manifests_set_rules_match_none_with_one_t def test__run_validation_across_target_manifests_set_rules_match_none_with_two_targets( self, va_obj: ValidateAttribute, - cross_val_df1: DataFrame, + test_df1: DataFrame, input_column: list, missing_ids: list[str], present_ids: list[str], @@ -1032,13 +1114,13 @@ def test__run_validation_across_target_manifests_set_rules_match_none_with_two_t Tests for ValidateAttribute._run_validation_across_target_manifests using matchNone set rule When there are nt matching values, no id get added - When there are mathcing values the id gets added to the repeat ids + When there are matching values the id gets added to the repeat ids """ with patch.object( schematic.models.validate_attribute.ValidateAttribute, "_get_target_manifest_dataframes", - return_value={"syn1": cross_val_df1, "syn2": cross_val_df1}, + return_value={"syn1": test_df1, "syn2": test_df1}, ): _, validation_output = va_obj._run_validation_across_target_manifests( rule_scope="set", @@ -1115,11 +1197,11 @@ def test__run_validation_across_targets_value( ("syn3", ["syn1"], ["syn1", "syn3"]), ], ) - def test__run_validation_across_targets_set_match_exactly_atleaset_one_no_missing_values( + def test__run_validation_across_targets_set_match_exactly_atleast_one_no_missing_values( self, va_obj: ValidateAttribute, - cross_val_col_names: dict[str, str], - cross_val_df1: DataFrame, + test_df_col_names: dict[str, str], + test_df1: DataFrame, rule: str, tested_column: list, target_id: str, @@ -1134,10 +1216,10 @@ def test__run_validation_across_targets_set_match_exactly_atleaset_one_no_missin """ output, bool_list1, bool_list2 = va_obj._run_validation_across_targets_set( val_rule=rule, - column_names=cross_val_col_names, + column_names=test_df_col_names, manifest_col=Series(tested_column), target_attribute="patientid", - target_manifest=cross_val_df1, + target_manifest=test_df1, target_manifest_id=target_id, missing_manifest_log={}, present_manifest_log=present_log_input.copy(), @@ -1162,11 +1244,11 @@ def test__run_validation_across_targets_set_match_exactly_atleaset_one_no_missin (["D", "F"], "syn3", [], []), ], ) - def test__run_validation_across_targets_set_match_exactly_atleaset_one_missing_values( + def test__run_validation_across_targets_set_match_exactly_atleast_one_missing_values( self, va_obj: ValidateAttribute, - cross_val_col_names: dict[str, str], - cross_val_df1: DataFrame, + test_df_col_names: dict[str, str], + test_df1: DataFrame, rule: str, tested_column: list, target_id: str, @@ -1179,10 +1261,10 @@ def test__run_validation_across_targets_set_match_exactly_atleaset_one_missing_v """ output, bool_list1, bool_list2 = va_obj._run_validation_across_targets_set( val_rule=rule, - column_names=cross_val_col_names, + column_names=test_df_col_names, manifest_col=Series(tested_column), target_attribute="patientid", - target_manifest=cross_val_df1, + target_manifest=test_df1, target_manifest_id=target_id, missing_manifest_log={}, present_manifest_log=present_log_input.copy(), @@ -1199,17 +1281,17 @@ def test__run_validation_across_targets_set_match_exactly_atleaset_one_missing_v def test__run_validation_across_targets_set_match_none( self, va_obj: ValidateAttribute, - cross_val_col_names: dict[str, str], - cross_val_df1: DataFrame, + test_df_col_names: dict[str, str], + test_df1: DataFrame, ) -> None: """Tests for ValidateAttribute._run_validation_across_targets_set for matchAtLeastOne""" output, bool_list1, bool_list2 = va_obj._run_validation_across_targets_set( val_rule="matchNone, Patient.PatientID, set", - column_names=cross_val_col_names, + column_names=test_df_col_names, manifest_col=Series(["A", "B", "C"]), target_attribute="patientid", - target_manifest=cross_val_df1, + target_manifest=test_df1, target_manifest_id="syn1", missing_manifest_log={}, present_manifest_log=[], @@ -1226,10 +1308,10 @@ def test__run_validation_across_targets_set_match_none( output, bool_list1, bool_list2 = va_obj._run_validation_across_targets_set( val_rule="matchNone, Patient.PatientID, set", - column_names=cross_val_col_names, + column_names=test_df_col_names, manifest_col=Series(["A"]), target_attribute="patientid", - target_manifest=cross_val_df1, + target_manifest=test_df1, target_manifest_id="syn2", missing_manifest_log={}, present_manifest_log=[], @@ -1270,7 +1352,7 @@ def test__gather_value_warnings_errors_passing( ) -> None: """ Tests for ValidateAttribute._gather_value_warnings_errors - For matchAtLeastOne to pass there must be no mssing values + For matchAtLeastOne to pass there must be no missing values For matchExactlyOne there must be no missing or duplicated values For matchNone there must be no repeat values """ @@ -1574,3 +1656,706 @@ def test__get_column_names( ) -> None: """Tests for ValidateAttribute._get_column_names""" assert va_obj._get_column_names(DataFrame(input_dict)) == expected_dict + + ############## + # get_no_entry + ############## + + @pytest.mark.parametrize( + "input_entry, node_name, expected", + [ + ("entry", "Check NA", False), + ("entry", "Check Date", False), + ("", "Check NA", False), + ("", "Check Date", True), + ], + ) + def test_get_no_entry( + self, + va_obj: ValidateAttribute, + input_entry: str, + node_name: str, + expected: bool, + ) -> None: + """ + This test shows that: + - if the entry is a normal string the result is always False(not no entry), + - if the entry is "" the result is False if the attribute has the "isNA" rule + - if the entry is "" the result is True if the attribute does not have the "isNA" rule + """ + assert va_obj.get_no_entry(input_entry, node_name) is expected + + ##################### + # get_entry_has_value + ##################### + + @pytest.mark.parametrize( + "input_entry, node_name, expected", + [ + ("entry", "Check NA", True), + ("entry", "Check Date", True), + ("", "Check NA", True), + ("", "Check Date", False), + ], + ) + def test_get_entry_has_value( + self, + va_obj: ValidateAttribute, + input_entry: str, + node_name: str, + expected: bool, + ) -> None: + """ + This test shows that: + - if the entry is a normal string the result is always True, + - if the entry is "" the result is True if the attribute has the "isNA" rule + - if the entry is "" the result is False if the attribute does not have the "isNA" rule + """ + assert va_obj.get_entry_has_value(input_entry, node_name) is expected + + ################# + # list_validation + ################# + + @pytest.mark.parametrize( + "input_column, rule", + [ + (Series(["x,x,x"], name="Check List"), "list like"), + (Series(["x,x,x"], name="Check List"), "list strict"), + (Series([], name="Check List"), "list like"), + (Series([], name="Check List"), "list strict"), + (Series(["x"], name="Check List"), "list like"), + (Series(["xxx"], name="Check List"), "list like"), + (Series(["1"], name="Check List"), "list like"), + (Series([1], name="Check List"), "list like"), + (Series([1.1], name="Check List"), "list like"), + (Series([1, 1, 1], name="Check List"), "list like"), + (Series([np.nan], name="Check List"), "list like"), + (Series([True], name="Check List"), "list like"), + ], + ) + def test_list_validation_passing( + self, va_obj: ValidateAttribute, input_column: Series, rule: str + ) -> None: + """ + This tests ValidateAttribute.list_validation + This test shows that: + - when using list like, just about anything is validated + - when using list strict, empty columns, and comma separated strings pass + + """ + errors, warnings, _ = va_obj.list_validation(rule, input_column) + assert len(errors) == 0 + assert len(warnings) == 0 + + @pytest.mark.parametrize( + "input_column", + [ + (Series(["x"], name="Check List")), + (Series(["xxxx"], name="Check List")), + (Series([1], name="Check List")), + (Series([1.1], name="Check List")), + (Series([1, 1, 1], name="Check List")), + (Series([np.nan], name="Check List")), + (Series([True], name="Check List")), + ], + ) + @pytest.mark.parametrize("rule", ["list strict", "list strict warning"]) + def test_list_validation_not_passing( + self, va_obj: ValidateAttribute, input_column: Series, rule: str + ) -> None: + """ + This tests ValidateAttribute.list_validation + This test shows what doesn't pass when using list strict + """ + errors, warnings, _ = va_obj.list_validation(rule, input_column) + if rule.endswith("warning"): + assert len(errors) == 0 + assert len(warnings) > 0 + else: + assert len(errors) > 0 + assert len(warnings) == 0 + + ################## + # regex_validation + ################## + + @pytest.mark.parametrize( + "input_column, rule", + [ + (Series(["a"], name="Check List"), "regex match [a-f]"), + (Series(["a,b,"], name="Check Regex List Strict"), "regex match [a-f]"), + ], + ) + def test_regex_validation_passing( + self, va_obj: ValidateAttribute, input_column: Series, rule: str + ) -> None: + """ + This tests ValidateAttribute.regex_validation + This test shows passing examples using the match rule + """ + errors, warnings = va_obj.regex_validation(rule, input_column) + assert len(errors) == 0 + assert len(warnings) == 0 + + @pytest.mark.parametrize( + "input_column, rule", + [ + (Series(["g"], name="Check List"), "regex match [a-f]"), + (Series(["a,b,c,g"], name="Check Regex List Strict"), "regex match [a-f]"), + ], + ) + def test_regex_validation_failing( + self, va_obj: ValidateAttribute, input_column: Series, rule: str + ) -> None: + """ + This tests ValidateAttribute.regex_validation + This test shows failing examples using the match rule + """ + errors, warnings = va_obj.regex_validation(rule, input_column) + assert len(errors) == 1 + assert len(warnings) == 0 + + @pytest.mark.parametrize( + "input_column, rule, exception", + [ + (Series(["a"]), "", ValidationError), + (Series(["a"]), "regex", ValidationError), + (Series(["a"]), "regex match", ValidationError), + (Series(["a"]), "regex match [a-f]", ValueError), + ], + ) + def test_regex_validation_exceptions( + self, va_obj: ValidateAttribute, input_column: Series, rule: str, exception + ) -> None: + """ + This tests ValidateAttribute.regex_validation + This test shows that: + - when the rule is malformed, a ValidationError is raised + - when the input series has no name, a ValueError is raised + + """ + with pytest.raises(exception): + va_obj.regex_validation(rule, input_column) + + @pytest.mark.parametrize( + "input_column, rule", + [ + (Series(["a,b,c"], name="Check Regex List"), "list::regex match [a-f]"), + ( + Series(["a,b,c", "d,e,f"], name="Check Regex List"), + "list::regex match [a-f]", + ), + ], + ) + def test_regex_validation_with_list_column( + self, va_obj: ValidateAttribute, input_column: Series, rule: str + ) -> None: + """ + This tests ValidateAttribute.regex_validation using a list column + """ + errors, warnings = va_obj.regex_validation(rule, input_column) + assert len(errors) == 0 + assert len(warnings) == 0 + + ################# + # type_validation + ################# + + @pytest.mark.parametrize( + "input_column, rule", + [ + (Series(["a"], name="Check String"), "str"), + (Series([1], name="Check Num"), "num"), + (Series([1], name="Check Int"), "int"), + (Series([1.1], name="Check Float"), "float"), + (Series([np.nan], name="Check String"), "str"), + (Series([np.nan], name="Check Num"), "num"), + (Series([np.nan], name="Check Int"), "int"), + (Series([np.nan], name="Check Float"), "float"), + ], + ) + def test_type_validation_passing( + self, va_obj: ValidateAttribute, input_column: Series, rule: str + ) -> None: + """ + This tests ValidateAttribute.type_validation + This test shows passing examples using the type rule + """ + errors, warnings = va_obj.type_validation(rule, input_column) + assert len(errors) == 0 + assert len(warnings) == 0 + + @pytest.mark.parametrize( + "input_column, rule", + [ + (Series([1], name="Check String"), "str"), + (Series([1], name="Check String"), "str error"), + (Series(["a"], name="Check Num"), "num"), + (Series(["a"], name="Check Num"), "num error"), + (Series(["20"], name="Check Num"), "num"), + (Series([1.1], name="Check Int"), "int"), + (Series(["a"], name="Check Int"), "int"), + (Series(["a"], name="Check Int"), "int error"), + (Series([1], name="Check Float"), "float"), + (Series(["a"], name="Check Float"), "float"), + (Series(["a"], name="Check Float"), "float error"), + ], + ) + def test_type_validation_errors( + self, va_obj: ValidateAttribute, input_column: Series, rule: str + ) -> None: + """ + This tests ValidateAttribute.type_validation + This test shows failing examples using the type rule + """ + errors, warnings = va_obj.type_validation(rule, input_column) + assert len(errors) == 1 + assert len(warnings) == 0 + + @pytest.mark.parametrize( + "input_column, rule", + [ + (Series([1], name="Check String"), "str warning"), + (Series(["a"], name="Check Num"), "num warning"), + (Series(["a"], name="Check Int"), "int warning"), + (Series(["a"], name="Check Float"), "float warning"), + ], + ) + def test_type_validation_warnings( + self, va_obj: ValidateAttribute, input_column: Series, rule: str + ) -> None: + """ + This tests ValidateAttribute.type_validation + This test shows failing examples using the type rule + """ + errors, warnings = va_obj.type_validation(rule, input_column) + assert len(errors) == 0 + assert len(warnings) == 1 + + @pytest.mark.parametrize( + "input_column, rule, exception, msg", + [ + ( + Series([1], name="Check String"), + "", + ValueError, + "val_rule first component: must be one of", + ), + ( + Series([1], name="Check String"), + "x", + ValueError, + "val_rule first component: x must be one of", + ), + ( + Series([1], name="Check String"), + "x x x", + ValueError, + "val_rule must contain no more than two components.", + ), + ], + ) + def test_type_validation_exceptions( + self, + va_obj: ValidateAttribute, + input_column: Series, + rule: str, + exception: Exception, + msg: str, + ) -> None: + """ + This tests ValidateAttribute.type_validation + This test shows failing examples using the type rule + """ + with pytest.raises(exception, match=msg): + va_obj.type_validation(rule, input_column) + + ################ + # url_validation + ################ + + @pytest.mark.parametrize( + "input_column", + [ + (Series([], name="Check URL")), + (Series([np.nan], name="Check URL")), + ( + Series( + ["https://doi.org/10.1158/0008-5472.can-23-0128"], name="Check URL" + ) + ), + ], + ) + def test_url_validation_passing( + self, + va_obj: ValidateAttribute, + input_column: Series, + ) -> None: + """ + This tests ValidateAttribute.url_validation + This test shows passing examples using the url rule + """ + errors, warnings = va_obj.url_validation("url", input_column) + assert len(errors) == 0 + assert len(warnings) == 0 + + @pytest.mark.parametrize( + "input_column", + [(Series([""], name="Check URL")), (Series(["xxx"], name="Check URL"))], + ) + def test_url_validation_failing( + self, + va_obj: ValidateAttribute, + input_column: Series, + ) -> None: + """ + This tests ValidateAttribute.url_validation + This test shows failing examples using the url rule + """ + errors, warnings = va_obj.url_validation("url", input_column) + assert len(errors) > 0 + assert len(warnings) == 0 + + ####################### + # _parse_validation_log + ####################### + + @pytest.mark.parametrize( + "input_log, expected_invalid_rows, expected_invalid_entities, expected_manifest_ids", + [ + ({}, [], [], []), + ({"syn1": Series(["A"])}, ["2"], ["A"], ["syn1"]), + ({"syn1": Series(["A"], index=[1])}, ["3"], ["A"], ["syn1"]), + ({"syn1": Series(["A", "B"])}, ["2"], ["A"], ["syn1"]), + ( + {"syn1": Series(["A"]), "syn2": Series(["B"])}, + ["2"], + ["A", "B"], + ["syn1", "syn2"], + ), + ], + ) + def test__parse_validation_log( + self, + va_obj: ValidateAttribute, + input_log: dict[str, Series], + expected_invalid_rows: list[str], + expected_invalid_entities: list[str], + expected_manifest_ids: list[str], + ) -> None: + """ + This test shows that + - an empty log returns empty values + - only the first value in each series is returned as invalid entities + - the index of the invalid entity is returned incremented by 2 + - each manifest entity is returned + + """ + invalid_rows, invalid_entities, manifest_ids = va_obj._parse_validation_log( + input_log + ) + assert invalid_rows == expected_invalid_rows + assert sorted(invalid_entities) == expected_invalid_entities + assert manifest_ids == expected_manifest_ids + + ################################### + # _merge_format_invalid_rows_values + ################################### + + @pytest.mark.parametrize( + "input_series1, input_series2, expected_invalid_rows, expected_invalid_entry", + [ + (Series([], name="x"), Series([], name="x"), [], []), + (Series(["A"], name="x"), Series([], name="x"), ["2"], ["A"]), + (Series([], name="x"), Series(["B"], name="x"), ["2"], ["B"]), + (Series(["A"], name="x"), Series(["B"], name="x"), ["2"], ["A", "B"]), + (Series(["A"], name="x"), Series(["C"], name="x"), ["2"], ["A", "C"]), + ( + Series(["A", "B"], name="x"), + Series(["C"], name="x"), + ["2", "3"], + ["A", "B", "C"], + ), + ], + ) + def test__merge_format_invalid_rows_values( + self, + va_obj: ValidateAttribute, + input_series1: Series, + input_series2: Series, + expected_invalid_rows: list[str], + expected_invalid_entry: list[str], + ) -> None: + """ + This test shows that + - the names of the series must match + - the indices of both series get combined and incremented by 2 + - the values of both series are combined + """ + invalid_rows, invalid_entry = va_obj._merge_format_invalid_rows_values( + input_series1, input_series2 + ) + assert invalid_rows == expected_invalid_rows + assert invalid_entry == expected_invalid_entry + + ############################ + # _format_invalid_row_values + ############################ + + @pytest.mark.parametrize( + "input_series, expected_invalid_rows, expected_invalid_entry", + [ + (Series([]), [], []), + (Series(["A"]), ["2"], ["A"]), + (Series(["A", "B"]), ["2", "3"], ["A", "B"]), + ], + ) + def test__format_invalid_row_values( + self, + va_obj: ValidateAttribute, + input_series: Series, + expected_invalid_rows: list[str], + expected_invalid_entry: list[str], + ) -> None: + """ + This test shows that the indices of the input series is incremented by 2 + """ + invalid_rows, invalid_entry = va_obj._format_invalid_row_values(input_series) + assert invalid_rows == expected_invalid_rows + assert invalid_entry == expected_invalid_entry + + ########################################### + # _remove_non_entry_from_invalid_entry_list + ########################################### + + @pytest.mark.parametrize( + "input_entry, input_row_num, input_name, expected_invalid_entry, expected_row_num", + [ + # Cases where entry and row number remain unchanged + ([], [], "", [], []), + (None, None, "", None, None), + (["A"], None, "", ["A"], None), + (None, ["1"], "", None, ["1"]), + (["A"], ["1"], "x", ["A"], ["1"]), + (["A", "B"], ["1"], "x", ["A", "B"], ["1"]), + (["A"], ["1", "2"], "x", ["A"], ["1", "2"]), + # When there are missing values the value and the row number are removed + ([""], ["1"], "x", [], []), + (["", ""], ["1", "2"], "x", [], []), + (["", "A"], ["1", "2"], "x", ["A"], ["2"]), + # When there are more row numbers than values, and there are missing values + # then the row number that corresponds to the missing value is removed + ([""], ["1", "2"], "x", [], ["2"]), + (["", ""], ["1", "2", "3", "4"], "x", [], ["3", "4"]), + (["", "A"], ["1", "2", "3", "4"], "x", ["A"], ["2", "3", "4"]), + (["A", ""], ["1", "2", "3", "4"], "x", ["A"], ["1", "3", "4"]), + ], + ) + def test__remove_non_entry_from_invalid_entry_list( + self, + va_obj: ValidateAttribute, + input_entry: list[str], + input_row_num: list[str], + input_name: str, + expected_invalid_entry: list[str], + expected_row_num: list[str], + ) -> None: + """ + Tests for ValidateAttribute.remove_non_entry_from_invalid_entry_list + """ + invalid_entry, row_num = va_obj._remove_non_entry_from_invalid_entry_list( + input_entry, input_row_num, input_name + ) + assert invalid_entry == expected_invalid_entry + assert row_num == expected_row_num + + @pytest.mark.parametrize( + "input_entry, input_row_num, input_name, exception", + [ + # if first two inputs are not empty, an empty name string causes an IndexError + (["A"], ["1"], "", IndexError), + # if there are more invalid entries than row numbers, there is an IndexError + (["", ""], ["1"], "x", IndexError), + ], + ) + def test__remove_non_entry_from_invalid_entry_list_exceptions( + self, + va_obj: ValidateAttribute, + input_entry: list[str], + input_row_num: list[str], + input_name: str, + exception: Exception, + ) -> None: + """ + Tests for ValidateAttribute.remove_non_entry_from_invalid_entry_list that cause + exceptions + """ + with pytest.raises(exception): + va_obj._remove_non_entry_from_invalid_entry_list( + input_entry, input_row_num, input_name + ) + + #################################### + # _check_if_target_manifest_is_empty + #################################### + + @pytest.mark.parametrize( + "input_dataframe, input_bool_list, input_column_dict, output_bool_list", + [ + # Dataframes with only required columns are always considered_empty + ( + DataFrame({"component": [], "id": [], "entityid": []}), + [], + {"component": "component", "id": "id", "entityid": "entityid"}, + [True], + ), + ( + DataFrame({"component": ["xxx"], "id": ["xxx"], "entityid": ["xxx"]}), + [], + {"component": "component", "id": "id", "entityid": "entityid"}, + [True], + ), + # Dataframes with non-required columns whose only values are null are considered empty + ( + DataFrame( + { + "component": ["xxx"], + "id": ["xxx"], + "entityid": ["xxx"], + "col1": [np.nan], + } + ), + [], + {"component": "component", "id": "id", "entityid": "entityid"}, + [True], + ), + ( + DataFrame( + { + "component": ["xxx"], + "id": ["xxx"], + "entityid": ["xxx"], + "col1": [np.nan], + "col2": [np.nan], + } + ), + [], + {"component": "component", "id": "id", "entityid": "entityid"}, + [True], + ), + # Dataframes with non-required columns who have non-null values are not considered empty + ( + DataFrame( + { + "component": ["xxx"], + "id": ["xxx"], + "entityid": ["xxx"], + "col1": ["xxx"], + } + ), + [], + {"component": "component", "id": "id", "entityid": "entityid"}, + [False], + ), + ( + DataFrame( + { + "component": ["xxx"], + "id": ["xxx"], + "entityid": ["xxx"], + "col1": [np.nan], + "col2": ["xxx"], + } + ), + [], + {"component": "component", "id": "id", "entityid": "entityid"}, + [False], + ), + ], + ) + def test__check_if_target_manifest_is_empty( + self, + va_obj: ValidateAttribute, + input_dataframe: DataFrame, + input_bool_list: list[bool], + input_column_dict: dict[str, str], + output_bool_list: list[bool], + ) -> None: + """ + Tests for ValidateAttribute._check_if_target_manifest_is_empty + """ + bool_list = va_obj._check_if_target_manifest_is_empty( + input_dataframe, input_bool_list, input_column_dict + ) + assert bool_list == output_bool_list + + @pytest.mark.parametrize( + "input_dataframe, input_bool_list, input_column_dict, exception", + [ + # column name dict must have keys "component", "id", "entityid" + (DataFrame({"component": [], "id": [], "entityid": []}), [], {}, KeyError), + # dataframe must have columns "component", "id", "entityid" + ( + DataFrame(), + [], + {"component": "component", "id": "id", "entityid": "entityid"}, + KeyError, + ), + ], + ) + def test__check_if_target_manifest_is_empty_exceptions( + self, + va_obj: ValidateAttribute, + input_dataframe: DataFrame, + input_bool_list: list[bool], + input_column_dict: dict[str, str], + exception: Exception, + ) -> None: + """ + Tests for ValidateAttribute._check_if_target_manifest_is_empty that cause + exceptions + """ + with pytest.raises(exception): + va_obj._check_if_target_manifest_is_empty( + input_dataframe, input_bool_list, input_column_dict + ) + + ################# + # _get_rule_scope + ################# + + @pytest.mark.parametrize( + "input_rule, output_scope", + [ + # After splitting by spaces, the third element is returned + ("a b c", "c"), + ("a b c d", "c"), + ], + ) + def test__get_rule_scope( + self, va_obj: ValidateAttribute, input_rule: str, output_scope: str + ) -> None: + """ + Tests for ValidateAttribute._get_rule_scope + """ + assert va_obj._get_rule_scope(input_rule) == output_scope + + @pytest.mark.parametrize( + "input_rule, exception", + [ + # The rule must a string when split by spaces, have atleast three elements + ("", IndexError), + ("x", IndexError), + ("x x", IndexError), + ("x;x;x", IndexError), + ], + ) + def test__get_rule_scope_exceptions( + self, va_obj: ValidateAttribute, input_rule: str, exception: Exception + ) -> None: + """ + Tests for ValidateAttribute._get_rule_scope that cause exceptions + """ + with pytest.raises(exception): + va_obj._get_rule_scope(input_rule)