diff --git a/.env.sample b/.env.sample index 9a186f4f8..a437bc951 100644 --- a/.env.sample +++ b/.env.sample @@ -17,4 +17,5 @@ GITHUB_SECRET="" OAUTH_PROVIDERS="github" SECRET_KEY="" NAMESPACE="default" -stdnfsPvc="stdnfs" \ No newline at end of file +stdnfsPvc="stdnfs" +CSRF_DOMAINS="https://*.renci.org" diff --git a/.github/workflows/build-push-dev-image.yml b/.github/workflows/build-push-dev-image.yml index 64d182e6d..1ac37f685 100644 --- a/.github/workflows/build-push-dev-image.yml +++ b/.github/workflows/build-push-dev-image.yml @@ -1,10 +1,9 @@ # Workflow responsible for the # development release processes. -# name: Build-Push-Dev-Image on: - push: + push: branches: - develop paths-ignore: @@ -16,90 +15,12 @@ on: - .gitignore - .dockerignore - .githooks - # Do not build another image on a pull request. - # Any push to develop will trigger a new build however. - pull_request: - branches-ignore: - - '*' - + # Do not build another image on a pull request. + # Any push to develop will trigger a new build however. + pull_request: + branches-ignore: + - '*' jobs: - build-push-dev-image: - runs-on: ubuntu-latest - steps: - - - name: Checkout Code - uses: actions/checkout@v3 - with: - ref: ${{ github.head_ref }} - # fetch-depth: 0 means, get all branches and commits - fetch-depth: 0 - - - name: Set short git commit SHA - id: vars - run: | - echo "short_sha=$(git rev-parse --short ${{ github.sha }})" >> $GITHUB_OUTPUT - # https://github.blog/changelog/2022-10-11-github-actions-deprecating-save-state-and-set-output-commands/ - - - name: Confirm git commit SHA output - run: echo ${{ steps.vars.outputs.short_sha }} - - # https://github.com/marketplace/actions/git-semantic-version - # - name: Semver Check - # uses: paulhatch/semantic-version@v5.0.3 - # id: version - # with: - # # The prefix to use to identify tags - # tag_prefix: "v" - # # A string which, if present in a git commit, indicates that a change represents a - # # major (breaking) change, supports regular expressions wrapped with '/' - # major_pattern: "/(breaking)|(major)/" - # # A string which indicates the flags used by the `major_pattern` regular expression. Supported flags: idgs - # major_regexp_flags: "ig" - # # Same as above except indicating a minor change, supports regular expressions wrapped with '/' - # minor_pattern: "/(feat)|(feature)|(minor)/" - # # A string which indicates the flags used by the `minor_pattern` regular expression. Supported flags: idgs - # minor_regexp_flags: "ig" - # # A string to determine the format of the version output - # # version_format: "${major}.${minor}.${patch}-prerelease${increment}" - # version_format: "${major}.${minor}.${patch}-prerelease${increment}" - # search_commit_body: false - - # Docker Buildx is important to caching in the Build And Push Container - # step - # https://github.com/marketplace/actions/build-and-push-docker-images - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to DockerHub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - logout: true - - - name: Login to Container Registry - uses: docker/login-action@v3 - with: - registry: containers.renci.org - username: ${{ secrets.CONTAINERHUB_USERNAME }} - password: ${{ secrets.CONTAINERHUB_TOKEN }} - logout: true - - - # Notes on Cache: - # https://docs.docker.com/build/ci/github-actions/examples/#inline-cache - - name: Build Push Container - uses: docker/build-push-action@v5 - with: - context: . - push: true - # Push to renci-registry and dockerhub here. - # cache comes from dockerhub. - tags: | - ${{ github.repository }}:develop - ${{ github.repository }}:${{ steps.vars.outputs.short_sha }} - containers.renci.org/${{ github.repository }}:develop - containers.renci.org/${{ github.repository }}:${{ steps.vars.outputs.short_sha }} - cache-from: type=registry,ref=${{ github.repository }}:buildcache-dev - cache-to: type=registry,ref=${{ github.repository }}:buildcache-dev,mode=max + build-push-dev-image: + uses: helxplatform/helx-github-actions/.github/workflows/build-push-dev-image.yml@main + secrets: inherit diff --git a/.github/workflows/build-push-release.yml b/.github/workflows/build-push-release.yml index 71fbc48f5..3620d7e4c 100644 --- a/.github/workflows/build-push-release.yml +++ b/.github/workflows/build-push-release.yml @@ -19,108 +19,8 @@ on: - .githooks tags-ignore: - 'v[0-9]+.[0-9]+.*' + - '*' jobs: build-push-release: - runs-on: ubuntu-latest - steps: - - name: Checkout Code - uses: actions/checkout@v3 - with: - ref: ${{ github.head_ref }} - fetch-depth: 0 - - - name: Set short git commit SHA - id: vars - run: | - echo "short_sha=$(git rev-parse --short ${{ github.sha }})" >> $GITHUB_OUTPUT - # https://github.blog/changelog/2022-10-11-github-actions-deprecating-save-state-and-set-output-commands/ - - - name: Confirm git commit SHA output - run: echo ${{ steps.vars.outputs.short_sha }} - - # https://github.com/marketplace/actions/git-semantic-version - - name: Semver Check - uses: paulhatch/semantic-version@v5.0.3 - id: version - with: - # The prefix to use to identify tags - tag_prefix: "v" - # A string which, if present in a git commit, indicates that a change represents a - # major (breaking) change, supports regular expressions wrapped with '/' - major_pattern: "/breaking|major/" - # A string which indicates the flags used by the `major_pattern` regular expression. Supported flags: idgs - major_regexp_flags: "ig" - # Same as above except indicating a minor change, supports regular expressions wrapped with '/' - minor_pattern: "/feat|feature|minor/" - # A string which indicates the flags used by the `minor_pattern` regular expression. Supported flags: idgs - minor_regexp_flags: "ig" - # A string to determine the format of the version output - # version_format: "${major}.${minor}.${patch}-prerelease${increment}" - version_format: "${major}.${minor}.${patch}" - search_commit_body: false - - # Docker Buildx is important to caching in the Build And Push Container - # step - # https://github.com/marketplace/actions/build-and-push-docker-images - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to DockerHub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - logout: true - - - name: Login to Container Registry - uses: docker/login-action@v3 - with: - registry: containers.renci.org - username: ${{ secrets.CONTAINERHUB_USERNAME }} - password: ${{ secrets.CONTAINERHUB_TOKEN }} - logout: true - - # Notes on Cache: - # https://docs.docker.com/build/ci/github-actions/examples/#inline-cache - - name: Build Push Container - uses: docker/build-push-action@v5 - with: - push: true - # Push to renci-registry and dockerhub here. - # cache comes from dockerhub. - tags: | - containers.renci.org/${{ github.repository }}:v${{ steps.version.outputs.version }} - containers.renci.org/${{ github.repository }}:latest - containers.renci.org/${{ github.repository }}:${{ steps.vars.outputs.short_sha }} - ${{ github.repository }}:v${{ steps.version.outputs.version }} - ${{ github.repository }}:latest - ${{ github.repository }}:${{ steps.vars.outputs.short_sha }} - cache-from: type=registry,ref=${{ github.repository }}:buildcache-release - cache-to: type=registry,ref=${{ github.repository }}:buildcache-release,mode=max - -#==========================TAG & RELEASE W/ NOTES ========================= - - # Note: GITHUB_TOKEN is autogenerated feature of github app - # which is auto-enabled when using github actions. - # https://docs.github.com/en/actions/security-guides/automatic-token-authentication - # https://docs.github.com/en/rest/git/tags?apiVersion=2022-11-28#create-a-tag-object - # https://docs.github.com/en/rest/git/refs?apiVersion=2022-11-28#create-a-reference - # This creates a "lightweight" ref tag. - - name: Create Tag for Release - run: | - curl \ - -s --fail -X POST \ - -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ - https://api.github.com/repos/${{ github.repository }}/git/refs \ - -d '{"ref":"refs/tags/v${{ steps.version.outputs.version }}","sha":"${{ github.sha }}"}' - -# https://cli.github.com/manual/gh_release_create - - name: Create Release - env: - RELEASE_VERSION: ${{ steps.version.outputs.version }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gh release create ${{ env.RELEASE_VERSION }} \ - -t "${{ env.RELEASE_VERSION }}" \ - --generate-notes \ - --latest + uses: helxplatform/helx-github-actions/.github/workflows/build-push-release.yml@main + secrets: inherit \ No newline at end of file diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml index ff128a9c7..2be162ac2 100644 --- a/.github/workflows/code-checks.yml +++ b/.github/workflows/code-checks.yml @@ -11,14 +11,14 @@ # # -name: Code-Checks +name: Code-Checks-Remote on: - push: - branches-ignore: + push: + branches-ignore: - master - main - develop - paths-ignore: + paths-ignore: - README.md - .old_cicd/* - .github/* @@ -27,90 +27,15 @@ on: - .gitignore - .dockerignore - .githooks - pull_request: - branches: + pull_request: + branches: - develop - master - main - types: [ opened, synchronize ] - - + types: [ opened, synchronize ] + jobs: -############################## flake8-linter ############################## - flake8-linter: - runs-on: ubuntu-latest - # if: ${{ github.actor == 'dependabot[bot]' }} - steps: - - uses: actions/checkout@v3 - - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.9' - - # Currently actions/setup-python supports caching - # but the cache is not as robust as cache action. - # Here we cache the entire python env which speeds subsequent builds up alot. (alot being scientific term) - # Ref: https://blog.allenai.org/python-caching-in-github-actions-e9452698e98d - - uses: actions/cache@v3 - name: Cache Python - with: - path: ${{ env.pythonLocation }} - key: ${{ env.pythonLocation }}-${{ hashFiles('setup.py') }}-${{ hashFiles('requirements.txt') }} - - - name: Install Requirements - run: | - pip install -r requirements.txt - - - name: Lint with flake8 - run: | - pip install flake8 - flake8 --ignore=E,W . - # We continue on error here until the code is clean - # flake8 --ignore=E,W --exit-zero . - continue-on-error: true - -############################## test-image-build ############################## - test-image-build: - runs-on: ubuntu-latest - # if: ${{ github.actor == 'dependabot[bot]' }} - steps: - - uses: actions/checkout@v3 - - - name: Set short git commit SHA - id: vars - run: | - echo "short_sha=$(git rev-parse --short ${{ github.sha }})" >> $GITHUB_OUTPUT - # https://github.blog/changelog/2022-10-11-github-actions-deprecating-save-state-and-set-output-commands/ - - - name: Confirm git commit SHA output - run: echo ${{ steps.vars.outputs.short_sha }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to DockerHub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - logout: true - - - name: Parse Github Reference Name - id: branch - run: | - REF=${{ github.ref_name }} - echo "GHR=${REF%/*}" >> $GITHUB_OUTPUT - - - # Notes on Cache: - # https://docs.docker.com/build/ci/github-actions/examples/#inline-cache - - name: Build Container - uses: docker/build-push-action@v5 - with: - context: . - push: true - tags: | - ${{ github.repository }}:test_${{ steps.branch.outputs.GHR }} - cache-from: type=registry,ref=${{ github.repository }}:buildcache - cache-to: type=registry,ref=${{ github.repository }}:buildcache,mode=max + + code-checks: + uses: helxplatform/helx-github-actions/.github/workflows/code-checks.yml@main + secrets: inherit \ No newline at end of file diff --git a/.github/workflows/trivy-pr-scan.yml b/.github/workflows/trivy-pr-scan.yml index 142572da2..0ff0bbeaa 100644 --- a/.github/workflows/trivy-pr-scan.yml +++ b/.github/workflows/trivy-pr-scan.yml @@ -18,50 +18,6 @@ on: - .githooks jobs: - trivy-pr-scan: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - with: - driver-opts: | - network=host - - - name: Login to DockerHub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - logout: true - - # Notes on Cache: - # https://docs.docker.com/build/ci/github-actions/examples/#inline-cache - - name: Build Container - uses: docker/build-push-action@v5 - with: - context: . - push: false - load: true - tags: ${{ github.repository }}:vuln-test - cache-from: type=registry,ref=${{ github.repository }}:buildcache - cache-to: type=registry,ref=${{ github.repository }}:buildcache,mode=max - - # We will not be concerned with Medium and Low vulnerabilities - - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@master - with: - image-ref: '${{ github.repository }}:vuln-test' - format: 'sarif' - severity: 'CRITICAL,HIGH' - output: 'trivy-results.sarif' - exit-code: '1' - # Scan results should be viewable in GitHub Security Dashboard - # We still fail the job if results are found, so below will always run - # unless manually canceled. - - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v2 - if: '!cancelled()' - with: - sarif_file: 'trivy-results.sarif' \ No newline at end of file + trivy-pr-scan: + uses: helxplatform/helx-github-actions/.github/workflows/trivy-pr-scan.yml@main + secrets: inherit diff --git a/.gitignore b/.gitignore index 300413090..a0e09b55f 100644 --- a/.gitignore +++ b/.gitignore @@ -148,7 +148,6 @@ venv *.DS_Store # Appstore optional dev dependencies -tycho *.sqlite* # React frontend template diff --git a/.old_cicd/Jenkinsfile b/.old_cicd/Jenkinsfile deleted file mode 100644 index 358fbaa8b..000000000 --- a/.old_cicd/Jenkinsfile +++ /dev/null @@ -1,104 +0,0 @@ -library 'pipeline-utils@master' - -CCV = "" - -pipeline { - agent { - kubernetes { - yaml """ -kind: Pod -metadata: - name: kaniko -spec: - containers: - - name: jnlp - workingDir: /home/jenkins/agent/ - - name: kaniko - workingDir: /home/jenkins/agent/ - image: gcr.io/kaniko-project/executor:debug - imagePullPolicy: Always - resources: - requests: - cpu: "512m" - memory: "1024Mi" - ephemeral-storage: "2816Mi" - limits: - cpu: "1024m" - memory: "2048Mi" - ephemeral-storage: "3Gi" - command: - - /busybox/cat - tty: true - volumeMounts: - - name: jenkins-docker-cfg - mountPath: /kaniko/.docker - - name: go - workingDir: /home/jenkins/agent/ - image: golang:1.19.1 - imagePullPolicy: Always - resources: - requests: - cpu: "512m" - memory: "512Mi" - ephemeral-storage: "1Gi" - limits: - cpu: "512m" - memory: "1024Mi" - ephemeral-storage: "1Gi" - command: - - /bin/bash - tty: true - volumes: - - name: jenkins-docker-cfg - secret: - secretName: rencibuild-imagepull-secret - items: - - key: .dockerconfigjson - path: config.json -""" - } - } - environment { - PATH = "/busybox:/kaniko:/ko-app/:$PATH" - DOCKERHUB_CREDS = credentials("${env.CONTAINERS_REGISTRY_CREDS_ID_STR}") - GITHUB_CREDS = credentials("${env.GITHUB_CREDS_ID_STR}") - REGISTRY = "${env.REGISTRY}" - REG_OWNER="helxplatform" - REPO_NAME="appstore" - COMMIT_HASH="${sh(script:"git rev-parse --short HEAD", returnStdout: true).trim()}" - IMAGE_NAME="${REGISTRY}/${REG_OWNER}/${REPO_NAME}" - } - - stages { - stage('Build') { - steps { - script { - container(name: 'go', shell: '/bin/bash') { - if (BRANCH_NAME.equals("master")) { - CCV = go.ccv() - } - } - container(name: 'kaniko', shell: '/busybox/sh') { - def tagsToPush = ["$IMAGE_NAME:$BRANCH_NAME", "$IMAGE_NAME:$COMMIT_HASH"] - if (CCV != null && !CCV.trim().isEmpty() && BRANCH_NAME.equals("master")) { - tagsToPush.add("$IMAGE_NAME:$CCV") - tagsToPush.add("$IMAGE_NAME:latest") - } else if (BRANCH_NAME.equals("develop")) { - def now = new Date() - def currTimestamp = now.format("yyyy-MM-dd'T'HH.mm'Z'", TimeZone.getTimeZone('UTC')) - tagsToPush.add("$IMAGE_NAME:$currTimestamp") - } - kaniko.buildAndPush("./Dockerfile", tagsToPush) - } - } - } - } - stage('Test') { - steps { - sh ''' - echo "Test stage" - ''' - } - } - } -} diff --git a/Dockerfile b/Dockerfile index b414e3c42..ff85daaa0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,11 +16,13 @@ RUN adduser --disabled-login --home $HOME --shell /bin/bash --uid $UID $USER && RUN set -x && apt-get update && \ chown -R $UID:$UID $APP_HOME && \ - apt-get install -y build-essential git xmlsec1 curl + apt-get install -y build-essential git xmlsec1 libpq5 gcc -# Specifically install node v14.x, since otherwise apt-get will install a much more outdated version. -RUN curl -sL https://deb.nodesource.com/setup_14.x | bash -RUN apt-get install -y nodejs +# Removing but leaving commented in case Tycho needs this for swagger. +# Version 3.3.1 currently, if not complaints v3.3.3 this can be +# completely removed. +# RUN curl -sL https://deb.nodesource.com/setup_14.x | bash +# RUN apt-get install -y nodejs WORKDIR $APP_HOME COPY . . diff --git a/Makefile b/Makefile index cb1966552..4174a4289 100644 --- a/Makefile +++ b/Makefile @@ -26,15 +26,6 @@ BRANDS := braini bdc heal restartr scidas eduhelx argus tracs eduhelx-s MANAGE := ${PYTHON} appstore/manage.py SETTINGS_MODULE := ${DJANGO_SETTINGS_MODULE} -# smoke|test -ARTILLERY_ENV := ${ARTILLERY_ENVIRONMENT} -# URL pointing to appstore base path, e.g. http://localhost:8000 -ARTILLERY_TARGET := ${ARTILLERY_TARGET} -# Duration in seconds that an Artillery load test lasts. -ARTILLERY_DURATION := ${ARTILLERY_DURATION} -# Amount of users to instantiate per second during an Artillery load test. -ARTILLERY_ARRIVAL_RATE := ${ARTILLERY_ARRIVAL_RATE} - ifdef GUNICORN_WORKERS NO_OF_GUNICORN_WORKERS := $(GUNICORN_WORKERS) else @@ -102,31 +93,6 @@ install: test: $(foreach brand,$(BRANDS),SECRET_KEY=${SECRET_KEY} DEV_PHASE=stub DJANGO_SETTINGS_MODULE=appstore.settings.$(brand)_settings ${MANAGE} test $(APP_LIST);) -#install.artillery: Install required packages for artillery testing -install.artillery: - cd artillery-tests; \ - npm install - -#test.artillery: Run artillery testing -test.artillery: -ifndef ARTILLERY_ENV - $(error ARTILLERY_ENVIRONMENT not set (smoke|load)) -endif -ifndef ARTILLERY_TARGET - $(error ARTILLERY_TARGET not set (should point to the base URL of appstore, e.g. "http://localhost:8000")) -endif -ifeq "${ARTILLERY_ENV}" "load" -ifndef ARTILLERY_DURATION - $(error ARTILLERY_DURATION not set when ARTILLERY_ENVIRONMENT=load (seconds that a load test lasts)) -endif -ifndef ARTILLERY_ARRIVAL_RATE - $(error ARTILLERY_ARRIVAL_RATE not set when ARTILLERY_ENVIRONMENT=load (users instantiated per second)) -endif -endif - cd artillery-tests; \ - ls -1 tests | xargs -L1 -I%TEST_NAME% npx artillery run tests/%TEST_NAME% --environment ${ARTILLERY_ENV} --target ${ARTILLERY_TARGET} - - #start: Run the gunicorn server start: build.postgresql.local if [ -z ${DJANGO_SETTINGS_MODULE} ]; then make help && echo "\n\nPlease set the DJANGO_SETTINGS_MODULE environment variable\n\n"; exit 1; fi diff --git a/README.md b/README.md index d7df2ca1f..5e571791f 100644 --- a/README.md +++ b/README.md @@ -515,7 +515,7 @@ appstore: tag: pullPolicy: Always django: - AUTHORIZED_USERS: + AUTHORIZED_USERS: EMAIL_HOST_USER: "appstore@renci.org" EMAIL_HOST_PASSWORD: DOCKSTORE_APPS_BRANCH: diff --git a/appstore/api/urls.py b/appstore/api/urls.py index aceac12bb..1f51eb754 100644 --- a/appstore/api/urls.py +++ b/appstore/api/urls.py @@ -1,5 +1,4 @@ -from django.conf.urls import re_path -from django.urls import include +from django.urls import include, re_path from .v1.router import v1_urlpatterns diff --git a/appstore/api/v1/views.py b/appstore/api/v1/views.py index 332e889fe..536548bbb 100644 --- a/appstore/api/v1/views.py +++ b/appstore/api/v1/views.py @@ -684,10 +684,11 @@ def _get_social_providers(self, request, settings): "allauth.account.auth_backends.AuthenticationBackend" in settings.AUTHENTICATION_BACKENDS ): - for provider in socialaccount.providers.registry.get_list(): + for provider in socialaccount.providers.registry.get_class_list(): + inst = provider(request, "allauth.socialaccount") provider_data.append( asdict( - LoginProvider(provider.name, provider.get_login_url(request)) + LoginProvider(inst.name, inst.get_login_url(request)) ) ) diff --git a/appstore/appstore/settings/base.py b/appstore/appstore/settings/base.py index a733bdce4..53d4e4d34 100644 --- a/appstore/appstore/settings/base.py +++ b/appstore/appstore/settings/base.py @@ -41,6 +41,7 @@ if DEBUG_STRING.lower() == "false": DEBUG_STRING = "" DEBUG = bool(DEBUG_STRING) + # stub, local, dev, val, prod. DEV_PHASE = os.environ.get("DEV_PHASE", "local") TYCHO_MODE = os.environ.get("TYCHO_MODE", "null" if DEV_PHASE == "stub" else "live") @@ -80,6 +81,7 @@ "django.contrib.auth", "django.contrib.messages", "django.contrib.sites", + "django_saml2_auth", ] THIRD_PARTY_APPS = [ @@ -102,6 +104,7 @@ "frontend", "middleware", "product", + "tycho", ] OAUTH_PROVIDERS = os.environ.get("OAUTH_PROVIDERS", "").split(",") @@ -122,9 +125,10 @@ "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", - "django.contrib.auth.middleware.RemoteUserMiddleware", + "django.contrib.auth.middleware.PersistentRemoteUserMiddleware", "middleware.filter_whitelist_middleware.AllowWhiteListedUserOnly", "middleware.session_idle_timeout.SessionIdleTimeout", + "allauth.account.middleware.AccountMiddleware" ] SESSION_IDLE_TIMEOUT = int(os.environ.get("DJANGO_SESSION_IDLE_TIMEOUT", 300)) @@ -143,8 +147,8 @@ ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 1 ACCOUNT_EMAIL_VERIFICATION = "none" -ACCOUNT_LOGIN_ATTEMPTS_LIMIT = 3 -ACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 86400 # 1 day in seconds +ACCOUNT_RATE_LIMITS= {'login_failed':10} +#deprecated ACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 86400 # 1 day in seconds ACCOUNT_LOGOUT_REDIRECT_URL = "/helx" LOGIN_REDIRECT_URL = "/helx/workspaces/login/success" LOGIN_URL = "/accounts/login" @@ -152,11 +156,13 @@ OIDC_SESSION_MANAGEMENT_ENABLE = True SAML_URL = "/accounts/saml" SAML_ACS_URL = "/saml2_auth/acs/" +#SAML_ACS_URL = "/sso/acs/" SOCIALACCOUNT_QUERY_EMAIL = ACCOUNT_EMAIL_REQUIRED SOCIALACCOUNT_STORE_TOKENS = True SOCIALACCOUNT_PROVIDERS = { "google": {"SCOPE": ["profile", "email"], "AUTH_PARAMS": {"access_type": "offline"}} } +SECURE_CROSS_ORIGIN_OPENER_POLICY = None TEMPLATES = [ { @@ -235,13 +241,15 @@ ) # Email configuration +# UNC Relay: relay.unc.edu:25 +# Renci Relay: relay.renci.org EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend" -EMAIL_HOST = "relay.unc.edu" -EMAIL_PORT = "25" +EMAIL_HOST =os.environ.get("EMAIL_HOST","") +EMAIL_PORT = os.environ.get("EMAIL_PORT", "") EMAIL_HOST_USER = os.environ.get("EMAIL_HOST_USER", "appstore@renci.org") EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD", "") RECIPIENT_EMAILS = os.environ.get("RECIPIENT_EMAILS", "") -EMAIL_USE_TLS = False +EMAIL_USE_TLS = os.environ.get("EMAIL_USE_TLS", False) # Boolean DEFAULT_FROM_EMAIL = os.environ.get("APPSTORE_DEFAULT_FROM_EMAIL", EMAIL_HOST_USER) DEFAULT_SUPPORT_EMAIL = os.environ.get( "APPSTORE_DEFAULT_SUPPORT_EMAIL", EMAIL_HOST_USER @@ -337,6 +345,9 @@ }, } +csrf_strings = os.environ.get("CSRF_DOMAINS", "") +CSRF_TRUSTED_ORIGINS = [] if len(csrf_strings) == 0 else csrf_strings.split(',') + # All debug settings if DEBUG and DEV_PHASE in ("local", "stub", "dev"): INSTALLED_APPS += [ @@ -347,6 +358,13 @@ "127.0.0.1", ] + CSRF_TRUSTED_ORIGINS += [ + "https://localhost", + "https://127.0.0.1", + "http://localhost", + "http://127.0.0.1", + ] + CORS_ALLOWED_ORIGINS = [ "https://localhost:3000", "https://127.0.0.1:3000", @@ -357,11 +375,6 @@ # We don't want to create security vulnerabilities through CORS policy. Only allow on dev deployments where the UI may be running on another origin. CORS_ALLOW_CREDENTIALS = True - CSRF_TRUSTED_ORIGINS = [ - "localhost", - "127.0.0.1", - ] - DEBUG_MIDDLEWARE = [ "corsheaders.middleware.CorsMiddleware", "debug_toolbar.middleware.DebugToolbarMiddleware", @@ -387,10 +400,18 @@ "first_name": "givenName", "last_name": "sn", }, + "TRIGGER": { + "CREATE_USER": "core.models.update_user", + }, "ASSERTION_URL": os.environ.get("SAML2_AUTH_ASSERTION_URL"), "ENTITY_ID": os.environ.get( "SAML2_AUTH_ENTITY_ID" ), # Populates the Issuer element in authn request + "USE_JWT": False, + 'WANT_ASSERTIONS_SIGNED': True, + 'AUTHN_REQUESTS_SIGNED': False, + 'WANT_RESPONSE_SIGNED': False, + 'TOKEN_REQUIRED': False, } # Metadata is required, either remote url or local file path, check the environment @@ -405,3 +426,5 @@ SAML2_AUTH["METADATA_AUTO_CONF_URL"] = metadata_source else: SAML2_AUTH["METADATA_LOCAL_FILE_PATH"] = metadata_source else: SAML2_AUTH["METADATA_LOCAL_FILE_PATH"] = metadata_source + +DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' diff --git a/appstore/appstore/settings/heal_settings.py b/appstore/appstore/settings/heal_settings.py index 16875adfc..861b70816 100644 --- a/appstore/appstore/settings/heal_settings.py +++ b/appstore/appstore/settings/heal_settings.py @@ -8,5 +8,5 @@ title="NIH HEAL Initiative", logo_url="/static/images/heal/logo.png", color_scheme=ProductColorScheme("#8a5a91", "#505057"), - links=None, + links=[], ) diff --git a/appstore/appstore/settings/helx_settings.py b/appstore/appstore/settings/helx_settings.py index 9dbc68308..80b4bbf31 100644 --- a/appstore/appstore/settings/helx_settings.py +++ b/appstore/appstore/settings/helx_settings.py @@ -8,5 +8,5 @@ title="HeLx", logo_url="/static/images/helx/logo.png", color_scheme=ProductColorScheme("#8a5a91", "#505057"), - links=None, + links=[], ) diff --git a/appstore/core/admin.py b/appstore/core/admin.py index c99a11d56..74b6104d7 100644 --- a/appstore/core/admin.py +++ b/appstore/core/admin.py @@ -4,8 +4,8 @@ class AuthorizedUserAdmin(admin.ModelAdmin): - fields = ['email'] - list_display = 'email' + fields = ['email', 'username'] + list_display = ['email', 'username'] admin.site.register(AuthorizedUser) diff --git a/appstore/core/migrations/0001_initial.py b/appstore/core/migrations/0001_initial.py index 79af42caf..2553af8da 100644 --- a/appstore/core/migrations/0001_initial.py +++ b/appstore/core/migrations/0001_initial.py @@ -15,7 +15,8 @@ class Migration(migrations.Migration): name='AuthorizedUsers', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('email', models.EmailField(max_length=254)), + ('email', models.EmailField(max_length=254, verbose_name='Email')), + ('username', models.CharField(max_length=128, verbose_name='Username')), ], ), ] diff --git a/appstore/core/models.py b/appstore/core/models.py index 1135ef126..df42b770f 100644 --- a/appstore/core/models.py +++ b/appstore/core/models.py @@ -1,15 +1,33 @@ from django.db import models +from django.contrib.auth import get_user_model +from django.core.exceptions import ValidationError +from django_saml2_auth.user import get_user +def update_user(user): + # as of Django_saml2_auth v3.12.0 does not add email address by default + # to the created use entry in django db according to: + # https://github.com/grafana/django-saml2-auth/blob/11b97beaa2a431209e2c54103cb49c033c42ff54/django_saml2_auth/user.py#L93 + # https://github.com/grafana/django-saml2-auth/blob/11b97beaa2a431209e2c54103cb49c033c42ff54/django_saml2_auth/user.py#L165 + # This trigger gets and set the email field in the django user db + _user = get_user(user) + _user.email = user['email'] + _user.save() + return _user class AuthorizedUser(models.Model): - email = models.EmailField(max_length=254) + email = models.EmailField(max_length=254, blank=True) + username = models.CharField(max_length=128, blank=True) def __str__(self): - return self.email + return f"{self.email}, {self.username}" + + def clean(self): + if not self.email and not self.username: + raise ValidationError("Please enter a value for either email and/or username. Both cannot be empty.") class IrodAuthorizedUser(models.Model): user = models.TextField(max_length=254) uid = models.IntegerField() def __str__(self): - return (self.user,self.uid) \ No newline at end of file + return f"{self.user}, {self.uid}" \ No newline at end of file diff --git a/appstore/core/static/images/radx/logo.png b/appstore/core/static/images/radx/logo.png new file mode 100644 index 000000000..6ade1b313 Binary files /dev/null and b/appstore/core/static/images/radx/logo.png differ diff --git a/appstore/middleware/filter_whitelist_middleware.py b/appstore/middleware/filter_whitelist_middleware.py index ef2affd16..57fda6623 100644 --- a/appstore/middleware/filter_whitelist_middleware.py +++ b/appstore/middleware/filter_whitelist_middleware.py @@ -18,9 +18,16 @@ class AllowWhiteListedUserOnly(MiddlewareMixin): + def __init__(self, get_response=None): + if get_response is not None: + self.get_response = get_response + else: + self.get_response = self._get_response + def process_request(self, request): user = request.user logger.debug(f"testing user: {user}") + if user.is_authenticated and not user.is_superuser: if not any( [ @@ -34,7 +41,6 @@ def process_request(self, request): request.path.startswith("/api/v1/providers"), ] ): - if self.is_authorized(user): logger.debug(f"Adding user {user} to whitelist") whitelist_group = Group.objects.get(name="whitelisted") @@ -56,6 +62,17 @@ def process_request(self, request): logger.info(f"accepting user {user}") return None + def _get_response(self, request): + """ + Call the next middleware in the chain to get a response. + """ + # Call the next middleware in the chain to get a response + if hasattr(self, 'process_response'): + return self.process_response(request) + else: + # If there's no process_response method, return None + return None + @staticmethod def is_whitelisted(user): if user.groups.filter(name="whitelisted").exists(): @@ -69,6 +86,11 @@ def is_auto_whitelisted_email(user): if re.match(pattern, email) is not None: return True return False + + @staticmethod + def is_whitelisted_username(user): + username = user.username + @staticmethod def is_authorized(user): @@ -79,6 +101,9 @@ def is_authorized(user): # authorize the user automatically, and allow them through. AuthorizedUser.objects.create(email=user.email) return True + if AuthorizedUser.objects.filter(username=user.username).exists(): + logger.debug(f"found user with username {user.username} in AuthorizedUser") + return True logger.debug(f"user email {user.email} not found in AuthorizedUser") return False @@ -98,6 +123,8 @@ def send_whitelist_email(request, user): msg = ( "A user " + user.email + + "/" + + user.username + " is requesting access to AppStore on " + settings.APPLICATION_BRAND + " and needs to be reviewed for whitelisting. Upon successful review, kindly add the user to" diff --git a/appstore/middleware/tests.py b/appstore/middleware/tests.py index 873d47c54..eb5f39cce 100644 --- a/appstore/middleware/tests.py +++ b/appstore/middleware/tests.py @@ -48,6 +48,7 @@ def _create_user_and_login( return user def test_login_whitelisted_user(self): + print("---- TESTING FOR WHITELISTED USER (steve_whitelist) ----- ") user = self._create_user_and_login( username="Steve_whitelist", email="steve@renci.com", password="admin" ) @@ -55,7 +56,22 @@ def test_login_whitelisted_user(self): self.request.user = user self.request.session = self.client.session response = self.middleware.process_request(self.request) - self.assertTrue(isinstance(response, HttpResponseRedirect)) + self.assertFalse(isinstance(response, HttpResponseRedirect)) + self.assertEqual( + list(self.request.user.groups.values_list("name", flat=True))[0], + self.groups.name, + ) + + def test_login_whitelisted_username(self): + print("---- TESTING FOR WHITELISTED USERNAME (steve_whitelist) ----- ") + user = self._create_user_and_login( + username="Steve_whitelist", email="steve@renci.com", password="admin" + ) + AuthorizedUser.objects.create(username=user.username) + self.request.user = user + self.request.session = self.client.session + response = self.middleware.process_request(self.request) + self.assertFalse(isinstance(response, HttpResponseRedirect)) self.assertEqual( list(self.request.user.groups.values_list("name", flat=True))[0], self.groups.name, diff --git a/appstore/tycho/__init__.py b/appstore/tycho/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/appstore/tycho/actions.py b/appstore/tycho/actions.py new file mode 100644 index 000000000..61e85048d --- /dev/null +++ b/appstore/tycho/actions.py @@ -0,0 +1,151 @@ +import argparse +import ipaddress +import json +import jsonschema +import logging +import netifaces +import os +import requests +import sys +import traceback +import yaml +from tycho.core import Tycho +from tycho.tycho_utils import NetworkUtils + +""" +Provides actions for creating, monitoring, and deleting distributed systems of cloud native +containers running on abstracted compute fabrics. + +""" +logger = logging.getLogger(__name__) + +""" Load the schema. """ +schema_file_path = os.path.join ( + os.path.dirname(__file__), + 'api-schema.yaml') +template = None +with open(schema_file_path, 'r') as file_obj: + template = yaml.load(file_obj, Loader=yaml.FullLoader) #nosec B506 + +backplane = None +_tycho = Tycho(backplane=backplane) + + +def tycho (): + return _tycho + + +class TychoResource: + """ Base class handler for Tycho requests. """ + def __init__(self): + self.specs = {} + + """ Functionality common to Tycho services. """ + def validate(self, request, component): + """ Validate a request against the schema. """ + if not self.specs: + with open(schema_file_path, 'r') as file_obj: + self.specs = yaml.load(file_obj, Loader=yaml.FullLoader) #nosec B506 + to_validate = self.specs["components"]["schemas"][component] + try: + logger.debug(f"--:Validating obj {request}") + logger.debug(f" schema: {json.dumps(to_validate, indent=2)}") + jsonschema.validate(request, to_validate) + except jsonschema.exceptions.ValidationError as error: + logger.error(f"ERROR: {str(error)}") + traceback.print_exc() + + def create_response(self, result=None, status='success', message='', exception=None): + """ Create a response. Handle formatting and modifiation of status for exceptions. """ + if exception: + traceback.print_exc() + status = 'error' + exc_type, exc_value, exc_traceback = sys.exc_info() + message = f"{exception.args[0]} {''.join (exception.args[1])}" \ + if len(exception.args) == 2 else exception.args[0] + result = { + 'error': message + } + return { + 'status': status, + 'result': result, + 'message': message + } + + +class StartSystemResource(TychoResource): + """ Parse, model, emit orchestrator artifacts and execute a system. """ + + """ System initiation. """ + def post(self, request): + response = {} + try: + logger.info(f"actions.StartSystemResource.post - start-system: {json.dumps(request, indent=2)}") + self.validate(request, component="System") + system = tycho().parse(request) + response = self.create_response( + result=tycho().get_compute().start(system), + message=f"Started system {system.name}") + except Exception as e: + response = self.create_response( + exception=e, + message=f"Failed to create system.") + return response + + +class DeleteSystemResource(TychoResource): + """ System termination. Given a GUID for a Tycho system, use Tycho core to eliminate all + components comprising the running system.""" + def post(self, request): + response = {} + system_name = None + try: + logger.debug(f"delete-request: {json.dumps(request, indent=2)}") + self.validate(request, component="DeleteRequest") + system_name = request['name'] + response = self.create_response( + result=tycho().get_compute().delete(system_name), + message=f"Deleted system {system_name}") + except Exception as e: + response = self.create_response( + exception=e, + message=f"Failed to delete system {system_name}.") + return response + + +class StatusSystemResource(TychoResource): + """ Status executing systems. Given a GUID (or not) determine system status. """ + + def post(self, request): + response = {} + try: + logging.debug(f"list-request: {request}") + self.validate(request, component="StatusRequest") + system_name = request.get('name', None) + system_username = request.get('username', None) + response = self.create_response( + result=tycho().get_compute().status(system_name, system_username), + message=f"Get status for system {system_name}") + except Exception as e: + response = self.create_response( + exception=e, + message=f"Failed to get system status.") + print(json.dumps(response, indent=2)) + return response + + +class ModifySystemResource(TychoResource): + """ Modify a system given a name, labels, resources(cpu and memory) """ + + def post(self, request): + try: + logging.debug(f"System specs to modify: {request}") + system_modify = tycho().parse_modify(request) + response = self.create_response( + result=tycho().get_compute().modify(system_modify), + message=f"Modified the system") + except Exception as e: + response = self.create_response( + exception=e, + message=f"Failed to modify system status.") + return response \ No newline at end of file diff --git a/appstore/tycho/api-schema.yaml b/appstore/tycho/api-schema.yaml new file mode 100644 index 000000000..0f4d5b9e5 --- /dev/null +++ b/appstore/tycho/api-schema.yaml @@ -0,0 +1,144 @@ +openapi: 3.0.1 +info: + description: Exploratory bioinformatic datascience via software defined distributed systems. + version: 0.0.1 + title: + contact: + email: scox@renci.org + license: + name: Apache 2.0 + url: 'http://www.apache.org/licenses/LICENSE-2.0.html' +externalDocs: + description: Exploratory bioinformatic datascience via software defined distributed systems. + url: 'https://github.com/heliumplusdatastage/tycho' +tags: + - name: message + description: Request compute services. + externalDocs: + description: Documentation for the compute request. + url: 'https://github.com/heliumplusdatastage/tycho#request' +paths: + /system/start: + post: + summary: Compute service request. + description: '' + operationId: start + requestBody: + description: Compute service request. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/System' + responses: + '200': + description: successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Response' + '400': + description: Invalid status value + x-swagger-router-controller: swagger_server.controllers.query_controller + +components: + schemas: + Limits: + type: object + properties: + cpus: + type: string + example: "0.3" + description: Number of CPUs requested. May be a fractional value. + memory: + type: string + example: "512M" + description: Amount of memory to request for this container. + Port: + type: object + properties: + containerPort: + type: integer + example: 80 + description: Container port to expose + EnvironmentVariable: + type: object + properties: + name: + type: string + example: X + description: Name of an environment variable + value: + type: string + example: http://example.org + description: A string value. + Container: + type: object + properties: + name: + type: string + example: web-server + description: Name of the container to execute. + image: + type: string + example: nginx:1.9.1 + description: Name and version of a docker image to execute. + limits: + type: array + items: + $ref: '#/components/schemas/Limits' + example: + - cpus: "0.3" + memory: "512M" + command: + type: array + required: false + items: + type: string + env: + type: array + items: + $ref: '#/components/schemas/EnvironmentVariable' + ports: + type: array + items: + $ref: '#/components/schemas/Port' + System: + type: object + properties: + name: + type: string + example: some-stack + description: Description of the system provided and defined by this set of components. + containers: + type: array + items: + $ref: '#/components/schemas/Container' + Response: + type: object + properties: + status: + type: string + example: success | error + description: Status code denoting the outcome of the activity. + message: + type: string + example: Job succeeded. + description: Description of the result. + result: + type: object + DeleteRequest: + type: object + properties: + name: + type: string + example: test-app + description: Identifier of system to delete + StatusRequest: + type: object + properties: + name: + type: string + example: test-app + nullable: true + description: Identifier of system to list \ No newline at end of file diff --git a/appstore/tycho/apps.py b/appstore/tycho/apps.py new file mode 100644 index 000000000..b275a4d30 --- /dev/null +++ b/appstore/tycho/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class TychoConfig(AppConfig): + name = 'tycho' + diff --git a/appstore/tycho/client.py b/appstore/tycho/client.py new file mode 100644 index 000000000..85ce49eb1 --- /dev/null +++ b/appstore/tycho/client.py @@ -0,0 +1,595 @@ +import requests +import ipaddress +import json +import logging +import os +import git +import shutil +import sys +import traceback +import argparse +import yaml +from tycho.tycho_utils import TemplateUtils +from tycho.config import Config +from tycho.exceptions import TychoException +from tycho.actions import StartSystemResource, StatusSystemResource, DeleteSystemResource, ModifySystemResource +from kubernetes import client as k8s_client, config as k8s_config + +logger = logging.getLogger (__name__) + +mem_converter = { + 'M' : lambda v : v * 10 ** 6, + 'G' : lambda v : v * 10 ** 9, + 'T' : lambda v : v * 10 ** 12, + 'P' : lambda v : v * 10 ** 15, + 'E' : lambda v : v * 10 ** 18 +} + + + +class TychoService: + """ Represent a service endpoint. """ + try_minikube = True + + def __init__(self, name, app_id, ip_address, port, sid=None, creation_time=None, username="",utilization={}, conn_string="", workspace_name="",is_ready=False): + self.name = name + self.app_id = app_id + self.ip_address = ip_address + self.port = port + self.identifier = sid + self.creation_time = creation_time + self.username = username + self.utilization = utilization + self.total_util = self.get_utilization (utilization) + self.conn_string = conn_string + self.workspace_name = workspace_name + self.is_ready = is_ready + + def get_utilization (self, utilization): + total = { + "gpu": 0, + "cpu" : 0, + "memory" : 0, + "ephemeralStorage" : 0 + } + for key, val in utilization.items (): + if 'cpu' in val.keys(): + if 'm' in val['cpu']: + total['cpu'] = total['cpu'] + int(val['cpu'].replace ('m', '')) + else: + total['cpu'] = total['cpu'] + int(val['cpu']) * 1000 + # Will have to adjust this if we want to check for non-nvidia GPUs. + for key in val.keys(): + if 'nvidia' in key: + total['gpu'] = total['gpu'] + int(val[key]) * 1000 + mem = val['memory'].replace ("i", "") + """ Run the conversion function designated by the last character of the value on the integer value """ + mem_val = mem_converter[mem[-1]] (int(mem[:-1])) + total['memory'] = total['memory'] + mem_val + total['memory'] = f"{total['memory'] / (10 ** 9)}" + return total + + def __repr__(self): + b = f"id: {self.identifier} time: {self.creation_time} util: {self.utilization}" + return f"name: {self.name} ip: {self.ip_address} port: {self.port} user:{self.username} {b}" + + +class TychoStatus: + """ A response from a status request. """ + def __init__(self, status, result, message): + self.status = status + self.services = list(map(lambda v: TychoService (**v), result)) + self.message = message + + def __repr__(self): + return f"status: {self.status} svcs: {[ str(s) for s in self.services ]} msg: {self.message}" + + +class TychoSystem: + """ Represents a running system. """ + def __init__(self, status, result, message): + self.status = status + if status == 'error': + raise TychoException (f"status:{status} result:{result} message:{message}") + self.name = result['name'] + self.identifier = result['sid'] + self.services = [ + TychoService(name=k, app_id=result['name'], ip_address=v['ip_address'], port=v['port-1']) + for k, v in result['containers'].items () + ] + self.conn_string = result['conn_string'] + self.message = message + + +class TychoClient: + """ Python client to Tycho dynamic application deployment API. """ + + def __init__(self, url="http://localhost:5000"): + """ Construct a client. + + :param url: URL of the Tycho API endpoint. + :type url: string + """ + self.url = f"{url}/system" + self.actions = { + 'start': StartSystemResource(), + 'status': StatusSystemResource(), + 'delete': DeleteSystemResource(), + 'modify': ModifySystemResource() + } + + def request (self, service, request): + """ Send a request to the server. Generic underlayer to all requests. + + :param service: URL path to the service to invoke. + :param request: JSON to send to the API endpoint. + """ + if os.environ.get("REST_API", "false") == "true": + response = requests.post (f"{self.url}/{service}", json=request) #nosec B113 + result_text = f"HTTP status {response.status_code} received from service: {service}" + logger.debug (f"TychoClient.request - {result_text}") + if not response.status_code == 200: + raise Exception (f"Error: {result_text}") + result = response.json () + logger.debug (f"TychoClient.request - {json.dumps(result, indent=2)}") + else: + result = self.actions.get(service).post(request) + logger.debug(f"TychoClient.request - {result} received from service: {service}") + return result + + def format_name (self, name): + """ Format a service name to be a valid DNS label. + + :param name: Format a name. + """ + return name.replace (os.sep, '-') + + def parse_env (self, environment): + return { + line.split("=", maxsplit=1)[0] : line.split("=", maxsplit=1)[1] + for line in environment.split ("\n") if '=' in line + } + + def start (self, request): + """ Start a service. + + The general format of a start request is:: + + { + "name" : , + "env" : , + "system" : + } + + :param request: A request object formatted as above. + :type request: JSON + :returns: Returns a TychoSystem object + """ + response = self.request ("start", request) + error = response.get('result',{}).get('error',None) + if error == 'error': + for e in error: + logger.error (e) + return TychoSystem (**response) + + def delete (self, request): + """ Delete a service. + + Given the GUID of a running service, delete it and all its constituent parts. + + The general format of a delete request is:: + + { + "name" : + } + + :param request: A request formatted as above. + :type request: JSON + """ + logger.error (f"-- delete: {json.dumps(request, indent=2)}") + print (f"-- delete: {json.dumps(request, indent=2)}") + return self.request ("delete", request) + + def status (self, request): + """ Get status of running systems. + + Get the status of a system by GUID or across systems. + + The format of a request is:: + + {} + + :param request: Request formatted as above. + :type request: JSON + """ + response = self.request ("status", request) + return TychoStatus (**response) + + def modify(self, request): + """ Takes in a JSON formatted metadata and specs of a running system. + + Some examples for a request, + * {"tycho-guid": , "labels": {"name": }, "resources": {"cpu": , "memory": }} + * {"tycho-guid": , "labels": {"name": ""}} + * {"tycho-guid": , "resources": {"cpu": , "memory": }} + + :param request: Request formatted as above + :type request: json + + :returns: A list of all the patches applied to the system + :rtype: A list + """ + response = self.request("modify", request) + return response + + def up (self, name, system, settings=""): + """ Bring a service up starting with a docker-compose spec. + + CLI endpoint to start a service on the Tycho compute fabric.:: + + tycho up -f path/to/docker-compose.yaml + + :param name: Name of the system. + :type name: str + :param system: Docker-compose JSON structure. + :type system: JSON + :param settings: The textual contents of a .env file. + :type settings: str + """ + services = {} + for container_name, container in system['services'].items (): + if 'ports' in container.keys(): + ports = container['ports'] + for port in ports: + port_num = int(port.split(':')[1] if ':' in port else port) + services[container_name] = { + "port" : port_num + #"clients" : [ "192.16.1.179" ] + } + + request = { + "name" : self.format_name (name), + "principal" : '{"username": "renci"}', + "serviceaccount" : "default", + "env" : self.parse_env (settings), + "system" : system, + "services" : services + } + logger.debug (f"client.up - request: {json.dumps(request, indent=2)}") + response = self.start (request) + logger.debug (f"client.up - response: {response}") + if response.status == 'error': + print (response.message) + else: + format_string = '{:<30} {:<35} {:<15} {:<7}' + print (format_string.format("SERVICE", "GUID", "IP_ADDRESS", "PORT")) + for service in response.services: + print (format_string.format ( + TemplateUtils.trunc (service.name, max_len=28), + TemplateUtils.trunc (response.identifier, max_len=33), + service.ip_address, + service.port)) + break + + def list (self, name, terse=False): + """ List status of executing systems. + + CLI endpoint to list status of services.:: + + tycho status + tycho status -terse + + :param name: GUID of a service to get status for. + :type name: str + :param terse: Print just the GUID for running systems + :type terse: boolean + """ + try: + request = { "name" : self.format_name (name) } if name else {} + request['username'] = 'renci' + response = self.status (request) + logger.debug (f"client.list - response: {response}") + if response.status == 'success': + if terse: + for service in response.services: + print (service.identifier) + elif len(response.services) == 0: + print ('None running') + else: + format_string = '{:<30} {:<35} {:<15} {:<7} {:<1}' + print (format_string.format("SYSTEM", "GUID", "IP_ADDRESS", "PORT", "CREATION_TIME")) + for service in response.services: + print (format_string.format ( + TemplateUtils.trunc (service.name, max_len=28), + TemplateUtils.trunc (service.identifier, max_len=33), + service.ip_address, + service.port, + service.creation_time)) + elif response.status == 'error': + print (response) + except Exception as e: + raise e + + def down (self, names): + """ Bring down a service. + + CLI endpoint for deleting running systems.:: + + tycho down + + :param names: GUIDs of systems to delete. + :type name: str + """ + try: + for name in names: + response = self.delete ({ "name" : self.format_name(name) }) + logger.debug (f"TychoClient.down - {json.dumps(response,indent=2)}") + if response.get('status',None) == 'success': + print (f"{name}") + else: + print (json.dumps (response, indent=2)) + except Exception as e: + traceback.print_exc() + + def patch(self, mod_items): + """ + Modify a running system. + Takes in a JSON formatted metadata and specs of a running system. + + Some examples for a request, + * {"tycho-guid": , "labels": {"name": }, "resources": {"cpu": , "memory": }} + * {"tycho-guid": , "labels": {"name": ""}} + * {"tycho-guid": , "resources": {"cpu": , "memory": }} + + CLI endpoint for modifying running systems:: + python client -m + + :param mod_items: Request formatted as above + :type mod_items: json + + :returns: A list of all the patches applied to the system + :rtype: A list + """ + logger.info(f"System specifications and metadata to be modified: {mod_items}") + try: + response = self.modify(mod_items) + logger.debug(f"TychoClient.patch - {json.dumps(response, indent=2)}") + return response + except (AttributeError, Exception) as e: + logger.exception(f"Error in modifying system. {e}") + + +class TychoClientFactory: + """ Locate a Tycho API instance in a Kubernetes cluster. + + This is written to work wheter run in-cluster or standalone. If we're running outside of + the cluster we use the environment's kubernetes configuration. If we're running + insde kubernetes, we use the "in cluster" configuration to locate the configuration. + """ + def __init__(self): + """ Initialize connection to Kubernetes. + + Load the kubernetes configuration in an enviroment appropriate way as described above. + + Then create the K8S API endpoint. + """ + if os.getenv('KUBERNETES_SERVICE_HOST'): + logger.debug ("--loading in cluster configuration.") + k8s_config.load_incluster_config() + else: + logger.debug ("--loading kube config, cluster external.") + k8s_config.load_kube_config() + api_client = k8s_client.ApiClient() + self.api = k8s_client.CoreV1Api(api_client) + + def get_client (self, name="tycho-api", namespace="default", default_url="http://localhost:5000"): + """ Locate the client endpoint using the K8s API. + + Locate the Tycho API using the K8S API. We do this by reading services in the + given namespace with the given name. Then we look for a load balancer IP and port + to build a URL. This works for public cloud clusters. With some modification, it + could work for Minikube but that is a future effort. + + :param name: Name of the Tycho API service in Kubernetes. + :type name: str + :param namespace: The namespace the service is deployed to. + :type namespace: str + """ + try: + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as secrets: + for line in secrets: + namespace = line + raise Exception + except Exception as e: + logger.warning(f"cannot get namespace from file: {e}") + + url = None + client = None + try: + service = self.api.read_namespaced_service( + name=name, + namespace=namespace) + if not service: + url = default_url + elif service.status and service.status.load_balancer: + ip_address = "tycho-api" + port = service.spec.ports[0].port + logger.debug (f"located tycho api instance in kube") + url = f"http://{ip_address}:{port}" + elif service.spec and len(service.spec.ports) > 0: + logger.debug ("--looking in minikube for a node port based service.") + ip = os.popen('minikube ip').read().strip () + if len(ip) > 0: + try: + ipaddress.ip_address (ip) + logger.info (f"configuring minikube ip: {ip}") + port = service.spec.ports[0].node_port + logger.debug (f"located tycho api instance in minikube") + url = f"http://{ip}:{port}" + except ValueError as e: + logger.error ("unable to get minikube ip address") + traceback.print_exc() + print(f"URL: {url}") + except Exception as e: + url = default_url + print(f"url: {url}") + logger.info (f"cannot find {name} in namespace {namespace}") + + logger.info (f"creating tycho client with url: {url}") + return TychoClient (url=url) + + +class TychoApps: + def __init__(self, app): + self.app = app + self.repo_url = 'https://github.com/heliumplusdatastage/app-support-prototype.git' + self.repo_name = 'app-support-prototype' + self.apps_dir = 'dockstore-yaml-proposals' + self.source_file = 'docker-compose.yaml' + self.env = '.env' + self.metadata = {} + + def deleterepo(self, repo_path): + shutil.rmtree(repo_path) + + def getmetadata(self): + base_dir = os.path.dirname(os.path.abspath(__file__)) + try: + git.Git(base_dir).clone(self.repo_url) + except Exception as e: + traceback.print_exc(e) + repo_path = os.path.join(base_dir, self.repo_name) + apps_path = os.path.join(repo_path, self.apps_dir) + for _, dnames, _ in os.walk(apps_path): + if self.app in dnames: + system_path = os.path.join(apps_path, self.app, self.source_file) + env_path = os.path.join(apps_path, self.app, self.env) + with open(system_path, "r") as stream: + system = yaml.safe_load(stream) + self.metadata['System'] = system + if os.path.exists(env_path): + with open(env_path, 'r') as stream: + settings = stream.read() + self.metadata['Settings'] = settings + break + self.deleterepo(repo_path) + return self.metadata + + +if __name__ == "__main__": + """ A CLI for Tycho. """ + status_command="@status_command" + parser = argparse.ArgumentParser(description='Tycho Client') + parser.add_argument('-u', '--up', help="Launch service.", action='store_true') + parser.add_argument('-s', '--status', help="Get status of running systems.", nargs='?', const=status_command, default=None) + parser.add_argument('-d', '--down', help="Delete a running system. Requires a system id.", nargs='*') + parser.add_argument('-p', '--port', type=int, help="Port to expose.") + parser.add_argument('-c', '--container', help="Container to run.") + parser.add_argument('-n', '--name', help="Service name.") + parser.add_argument('--service', help="Tycho API URL.", default="http://localhost:5000") + parser.add_argument('--env', help="Env variable", default=None) + parser.add_argument('--command', help="Container command", default=None) + parser.add_argument('--settings', help="Environment settings", default=None) + parser.add_argument('-f', '--file', help="A docker compose (subset) formatted system spec.") + parser.add_argument('-a', '--app', help="Name of the app. Should be one of the apps available in app-support-prototype repo") + parser.add_argument('-t', '--trace', help="Trace (debug) logging", action='store_true', default=False) + parser.add_argument('--terse', help="Keep status short", action='store_true', default=False) + parser.add_argument('-v', '--volumes', help="Mounts a volume", default=None) + parser.add_argument('-m', '--modify', help="Modify a running system", default=None) + args = parser.parse_args () + + """ Honor debug and trace settings. """ + if args.trace: + logging.basicConfig(level=logging.DEBUG) + + """ Resolve environment settings file as text. """ + settings="" + if args.settings: + with open(args.settings, "r") as stream: + settings = stream.read () + + name=args.name + system=None + if args.app: + """Name for the app""" + name = args.app + + """ Apply settings. """ + tychoapps = TychoApps(args.app) + metadata = tychoapps.getmetadata() + + if 'System' in metadata.keys(): + system = metadata['System'] + if 'Settings' in metadata.keys(): + settings = metadata['Settings'] + print(f"settings: {settings}") + + elif args.file: + if not args.name: + """ We've been given a docker-compose.yaml. Come up with a name for the app + based on the containing directory if none has been otherwise supplied. """ + if os.sep in args.file: + args.file = os.path.abspath (args.file) + name = args.file.split('.')[0] if '.' in args.file else args.file + name = name.split (os.sep)[-2] + else: + name = os.path.basename (os.getcwd ()) + + """ Apply settings. """ + env_file = os.path.join (os.path.dirname (args.file), ".env") + if os.path.exists (env_file): + with open (env_file, 'r') as stream: + settings = stream.read () + # added safeloader here, per bandit instructions. + with open(args.file, "r") as stream: + system = yaml.load (stream.read (), Loader=yaml.SafeLoader) + else: + """ Generate a docker-compose spec based on the CLI args. """ + name = args.name + template_utils = TemplateUtils (config=Config()) + template = """ + version: "3" + services: + {{args.name}}: + image: {{args.container}} + {% if args.command %} + entrypoint: {{args.command}} + {% endif %} + {% if args.port %} + ports: + - "{{args.port}}" + {% endif %} + {% if args.volumes %} + volumes: + - "{{args.volumes}}" + {% endif %}""" + + system = template_utils.render_text( + TemplateUtils.apply_environment (settings, template), + context={ "args" : args }) + + client = None + """ Locate the Tycho API endpoint. Instantiate a client to use the endpoint. """ + if args.service == parser.get_default ("service"): + """ If the endpoint is the default value, try to discover the endpoint in kube. """ + client_factory = TychoClientFactory () + client = client_factory.get_client () + if not client: + """ That didn't work so use the default value. """ + client = TychoClient (url=args.service) + if not client: + logger.info (f"creating client directly {args.service}") + client = TychoClient (url=args.service) + + if args.up: + client.up (name=name, system=system, settings=settings) + elif args.down: + client.down (names=args.down) + elif args.status: + if args.status == status_command: # non arg + client.list (name=None, terse=args.terse) + else: + client.list (name=args.status, terse=args.terse) + elif args.modify: + mod_items = json.loads(args.modify) + client.patch(mod_items=mod_items) diff --git a/appstore/tycho/compute.py b/appstore/tycho/compute.py new file mode 100644 index 000000000..d73173010 --- /dev/null +++ b/appstore/tycho/compute.py @@ -0,0 +1,102 @@ +import argparse +import json +import logging +import os +import sys +import threading +import traceback +import yaml + +logger = logging.getLogger (__name__) + +class Compute: + """ Abstraction of a compute cluster. + + We start with three primitives: start, status, and delete. Start takes a JSON object + including (a) a docker-compose formatted description of a cloud native distributed system (b) the contents of an .env file accompanying a docker-compose.yaml including enviroment specific settings, and (c) additional metadata. + + """ + + def start (self, system, namespace="default"): + """ Given a system definition, start a distributed system. + + In docker port mapping pairs of the form :, we ignore + the host port. Tycho is designed to start many instances of an application and dynamic + port allocation is assumed. + + Volume mounts of the form : will make a platform specific + mapping of host_path. The general request format is:: + + { + "name" : , + "env" : , + "system" : + } + + Responses contain status, message, and result elements.:: + + { + "status": "success", + "result": { + "name": "nginx-7703f9cbf8f34caf8bc64e84384b7f1f", + "sid": "7703f9cbf8f34caf8bc64e84384b7f1f", + "containers": { + "nginx": { + "port": 30306 + } + } + }, + "message": "Started system nginx-7703f9cbf8f34caf8bc64e84384b7f1f" + } + + :param system: docker-compose formatted specification of a distributed system. + :type json: A JSON object structured as: + :param namespace: Namespace. May not be supported by underlying compute fabric. + :type namespace: string + :return: Returns a JSON object including status, message, and result. Result is a dictionary containing details of the creatd object including name, system id (sid), and port mappings for each exposed service. + + """ + pass + def delete (self, guid, namespace="default"): + """ Delete a distributed system. + + Delete all generated artifacts in the underlying system. + + An example response:: + + { + "status": "success", + "result": {}, + "message": "Deleted system 82a9b5dc7d7c40c69ac05e3fb0f4df86" + } + + :param guid: Globally unique identifier of the system, as returned by start. + :type guid: string + :param namespace: Namespace. May not be supported by underlying compute fabric. + :type namespace: string + """ + pass + def status (self, guid=None, namespace="default"): + """ Get status on running components of the system. + + Provided with a GUID, return status on matching components. + + Without a GUID, return data on all running components. + + An example response:: + + { + "status": "success", + "result": [{ + "name": "nginx-7703f9cbf8f34caf8bc64e84384b7f1f", + "sid": "7703f9cbf8f34caf8bc64e84384b7f1f", + "port": "30306" + }], + "message": "Get status for system None" + } + + :param guid: GUID returned by start for a system. + :param namespace: Namespace. May not be supported by underlying compute fabric. + + """ + pass diff --git a/appstore/tycho/conf/tycho.yaml b/appstore/tycho/conf/tycho.yaml new file mode 100644 index 000000000..37ac4bd0f --- /dev/null +++ b/appstore/tycho/conf/tycho.yaml @@ -0,0 +1,42 @@ +tycho: + # Configuration file for Tycho. + + # Configure the orchestrator to use. + backplane: kubernetes + compute: + platform: + kube: + ip: x # Used for minikube environments. + system: + # Configure application defaults. + defaults: + securityContext: + uid: 1000 + gid: 1000 + services: + init: + resources: + cpus: 250m + memory: 250Mi + deploy: + resources: + cpus: 0.2 + memory: 50MB + volumes: + - pvc://stdnfs_pvc:parent_dir/shared_dir + - pvc://stdnfs_pvc/shared_dir:parent_dir/shared_dir + - pvc://stdnfs_pvc/username:parent_dir/subpath_dir + + policy: + network: + ingress: + # If supplied, restrict ingress by CIDR. + restrict_by_client_cidr: true + egress: + mode: none + + templates: + # A list of alternate directories to search for templates. + # see tycho/templates for a list of templates that can be overriden. + # Place + paths: [] \ No newline at end of file diff --git a/appstore/tycho/config.py b/appstore/tycho/config.py new file mode 100644 index 000000000..f53091de8 --- /dev/null +++ b/appstore/tycho/config.py @@ -0,0 +1,47 @@ +import ipaddress +import json +import logging +import os +import yaml +import traceback +import re +from tycho.tycho_utils import Resource + +logger = logging.getLogger (__name__) + +class Config(dict): + """ Handle configuration for the system. """ + def __init__(self, config="conf/tycho.yaml"): + """ Load the system configuration. """ + if isinstance(config, str): + config_path = Resource.get_resource_path (config) + logger.debug (f"loading config: {config_path}") + with open(config_path, 'r') as f: + self.conf = yaml.safe_load (f) + elif isinstance(config, dict): + self.conf = config + else: + raise ValueError + + """ Determine if we're on minikube. If so, share its ip address via + the config. """ + #logger.debug (f"loaded config: {json.dumps(self.conf,indent=2)}") + if 'TYCHO_ON_MINIKUBE' in os.environ: + ip = os.popen('minikube ip').read().strip () + if len(ip) > 0: + try: + ipaddress.ip_address (ip) + logger.info (f"configuring minikube ip: {ip}") + self.conf['tycho']['compute']['platform']['kube']['ip'] = ip + except ValueError as e: + logger.error ("unable to get minikube ip address") + traceback.print_exc() + + def __setitem__(self, key, val): + self.conf.__setitem__(key, val) + def __str__(self): + return self.conf.__str__() + def __getitem__(self, key): + return self.conf.__getitem__(key) + def get (self, key, default=None): + return self.conf.get(key, default) \ No newline at end of file diff --git a/appstore/tycho/context.py b/appstore/tycho/context.py new file mode 100644 index 000000000..41c863a8f --- /dev/null +++ b/appstore/tycho/context.py @@ -0,0 +1,437 @@ +import json +import logging +import os +import traceback +import uuid +import yaml +import copy +from deepmerge import Merger +from requests_cache import CachedSession +from string import Template +from jinja2 import Template as jinja2Template +from tycho.client import TychoStatus, TychoSystem, TychoClient +from tycho.exceptions import ContextException + +from urllib.parse import urljoin + +logger = logging.getLogger (__name__) + +mixin_merge = Merger( + [ (list,["override"]), (dict,["merge"]), (set,["union"]) ], + ["override"], + ["override"] +) + +class Principal: + """ Abstract representation of a system identity. """ + def __init__(self, username, a_token=None, r_token=None): + self.username=username + self.access_token=a_token + self.refresh_token=r_token + +class TychoContext: + """ + Load, understand, and use the app registry. + + The app registry is a declarative metadata repository outlining apps available to + the platform. Its YAML definition structure provides + * Basic metadata about the registry itself including identifier, version, name, etc. + * A list of repositories or locations apps might reference for further metadata. + * + """ + + """ https://github.com/heliumdatacommons/CommonsShare_AppStore/blob/master/CS_AppsStore/cloudtop_imagej/deployment.py """ + def __init__(self, registry_config="app-registry.yaml", app_defaults_config="app-defaults.yaml", product="common", tycho_config_url="", stub=False): + # Make sure tycho_config_url ends with "/" or suffix is removed by urljoin. + if tycho_config_url != "": + tycho_config_url += "/" if not tycho_config_url.endswith("/") else "" + self.tycho_config_url = tycho_config_url + logger.info (f"-- TychoContext.__init__: registry_config: {registry_config} | app_defaults_config: {app_defaults_config} | product: {product} | tycho_config_url: {self.tycho_config_url} | stub: {stub}") + self.http_session = CachedSession (cache_name='tycho-registry') + self.registry = self._get_config(registry_config) + self.app_defaults = self._get_config(app_defaults_config) + self.log_dict(self.app_defaults, pre_dict_message="defaults = \n") + self.log_dict(self.registry, pre_dict_message="registry = \n") + """ Uncomment this and related lines when this code goes live,. + Use a timeout on the API so the unit tests are not slowed down. """ + if not os.environ.get ('DEV_PHASE') == 'stub': + self.client=TychoClient(url=os.environ.get('TYCHO_URL', "http://localhost:5000")) + self.product = product + self.apps = self._grok () + + def _get_config(self, file_name): + """ Load the registry metadata. """ + logger.info (f"-- loading config:\n file_name: {file_name}\ntycho_config_url: {self.tycho_config_url}") + config = {} + if self.tycho_config_url == "": + """ Load it from the Tycho conf directory for now. Perhaps more dynamic in the future. """ + config_path = os.path.join ( + os.path.dirname (__file__), + "conf", + file_name) + with open(config_path, 'r') as stream: + config = yaml.safe_load (stream) + else: + try: + app_registry_url = urljoin(self.tycho_config_url, file_name) + logger.debug (f"-- downloading {app_registry_url}") + response = self.http_session.get(app_registry_url) + if response.status_code != 200: + raise ValueError(f"-- failed to download: {response.status_code}") + else: + config = yaml.safe_load (response.text) + except Exception as e: + logger.error (f"-- URL: {app_registry_url}\nerror: {e}") + logger.debug ("", exc_info=True) + return config + + def log_dict(self, dict, pre_dict_message="", level=logging.DEBUG): + message = pre_dict_message + json.dumps(dict, sort_keys=True, indent=4) + logger.log(level, message) + + def add_conf_impl(self, apps, context): + for key, value in context.items(): + if key in apps.keys(): + apps[key] = {**apps[key], **value} + self.add_conf_impl(apps, value) + return apps + + def inherit (self, contexts, context, apps={}): + for base in context.get ("extends", []): + self.inherit (contexts, contexts[base], apps) + apps.update (copy.deepcopy(context.get ("apps", {}))) + return apps + + def mixin_defaults(self,apps): + for app in apps: + apps[app] = mixin_merge.merge(copy.deepcopy(apps[app]),copy.deepcopy(self.app_defaults)) + + def mixin(self,contexts,context,apps): + for base in context.get ("extends", []): + self.mixin(contexts,contexts[base],apps) + for mixer in context.get("mixin", []): + for app in apps: + if contexts.get(mixer,None) != None and contexts[mixer].get("apps",None) != None and contexts[mixer]["apps"].get(app,None) != None: + logger.info("mixing " + app) + apps[app] = mixin_merge.merge(copy.deepcopy(apps[app]),copy.deepcopy(contexts[mixer]["apps"].get(app))) + return apps + + def _grok (self): + """ Compile the registry, resolving text substituations, etc. """ + apps = {} + contexts = self.registry.get ('contexts', {}) + if not self.product in contexts: + raise ContextException (f"undefined product {self.product} not found in contexts.") + logger.info (f"-- load-context: id:{self.product}") + ''' + context = contexts[self.product] + apps = context.get ('apps', {}) + """ Resolve context inheritance. """ + for base_name in context.get ('extends', []): + if not base_name in contexts: + raise ContextException (f"base {base_name} of context {self.product} not found in registry.") + logger.debug (f"resolving inheritance of base {base_name} by context {self.product}") + apps.update (contexts[base_name].get('apps')) + new_apps = contexts[base_name].get ('apps', {}) + new_apps.update (apps) + apps = new_apps + ''' + context = contexts[self.product] + logger.debug (f"---------------> {context}") + apps = self.inherit (contexts=contexts, context=context) + self.mixin_defaults(apps) + self.mixin(contexts,context,apps) + """ Load the repository map to enable string interpolation. """ + repository_map = { + key : value['url'] + for key, value in self.registry.get ('repositories', {}).items () + } + """ Compile URLs to resolve repository variables. """ + for name, app in apps.items (): #context.get('apps',{}).items (): + if not 'spec' in app: + repos = list(repository_map.items()) + if len(repos) == 0: + raise ValueError ("No spec URL and no repositories specified.") + repo_url = repos[0][1] + if not repo_url.startswith("http"): + # Assume it is a directory within the same repo as the app registry file. + if self.tycho_config_url == "": + logging.error("tycho_config_url is empty string") + raise ValueError(f"-- tycho_config_url is empty string, can't load app registry file") + repo_url = urljoin(self.tycho_config_url, repo_url) + # ToDo: Remove the next four lines if we deprecate DOCKSTORE_APPS_BRANCH. + dockstore_branch = os.environ.get("DOCKSTORE_APPS_BRANCH", "") + external_tycho_app_registry_enabled = os.environ.get("EXTERNAL_TYCHO_APP_REGISTRY_ENABLED", "") + if external_tycho_app_registry_enabled == "false" and dockstore_branch != "": + repo_url = repo_url.replace("master", dockstore_branch) + app['spec'] = f"{repo_url}/{name}/docker-compose.yaml" + spec_url = app['spec'] + app['icon'] = os.path.join (os.path.dirname (spec_url), "icon.png") + for key in [ 'spec', 'icon', 'docs' ]: + url = app[key] + app[key] = Template(url).safe_substitute (repository_map) + logger.debug (f"-- spec: {app['spec']}") + logger.debug (f"-- icon: {app['icon']}") + logger.debug (f"-- product {self.product} resolution => apps: {apps.keys()}") + apps = self.add_conf_impl(apps, context) + for app, value in apps.items(): + logger.debug(f"TychoContext._grok -\napp: {app}\nvalue: {value}") + return apps + + def get_definition(self, app_id): + """ Get the apps source definition""" + app_definition = self.apps[app_id].get('definition') + if not app_definition: + try: + logger.debug (f"-- resolving definition for {app_id}") + url = self.apps[app_id]['spec'] + response = self.http_session.get(url) + if response.status_code != 200: + raise ValueError(f"-- app {app_id}. failed to parse spec. code:{response.status_code}") + template_dict = yaml.safe_load (response.text) + context = self.registry["settings"] + logger.debug (f"-----> context: {context}") + spec_template = str (template_dict) + logger.debug (f"-----> spec_template:\n{spec_template}") + template = jinja2Template (spec_template) + try: + app_def_str = template.render(**context) + logger.debug (f"-----> Rendered app definition:\n{app_def_str}") + + # Validate safety of rendered definition/convert back to dict before storing + app_definition = yaml.safe_load(app_def_str) + self.log_dict(app_definition, pre_dict_message="app_definition = \n") + self.apps[app_id]['definition'] = app_definition + except Exception as e: + logger.error (f"-- app {app_id} failed to render app definition.\nError: {e}") + logger.debug ("", exc_info=True) + logger.warning (f"-- Setting app {app_id} app definition to empty dict") + app_definition = {} + self.apps[app_id]['definition'] = app_definition + except Exception as e: + logger.error (f"-- app {app_id}. failed to parse definition.\nstatus code:{response.status_code}\nerror: {e}") + logger.debug ("", exc_info=True) + return app_definition + + def get_spec (self, app_id): + """ Get the URL of the system docker-compose yaml specification. """ + spec = self.apps[app_id].get ('spec_obj', None) + if not spec: + url = None + response = None + try: + logger.debug (f"-- resolving specification for app: {app_id}") + url = self.apps[app_id]['spec'] + response = self.http_session.get (url) + if response.status_code != 200: + raise ValueError (f"-- app {app_id}. failed to parse spec. code:{response.status_code}") + template_dict = yaml.safe_load (response.text) + context = self.registry["settings"] + logger.debug (f"-----> context: {context}") + spec_template = str (template_dict) + logger.debug (f"-----> spec_template: {spec_template}") + template = jinja2Template (spec_template) + try: + spec_str = template.render(**context) + logger.debug (f"-----> rendered spec:\n{spec_str}") + + # Validate spec/convert spec to a dict before storing + spec = yaml.safe_load (spec_str) + self.apps[app_id]['spec_obj'] = spec + except Exception as e: + traceback.print_exc () + logger.error (f"-- app {app_id}.\n Failed to render spec.\nError: {e}") + except Exception as e: + traceback.print_exc () + if response: + logger.error (f"-- app {app_id}. failed to parse spec. code:{response.status_code}") + else: + logger.error (f"-- app {app_id}. failed to parse spec.") + raise e + return spec + + def get_env_registry(self, app_id, settings): + """ Get the env variables specified for an app in the registry and update settings""" + env = self.apps[app_id].get('env', None) + if env: + settings.update(env) + return settings + + def get_settings (self, app_id): + """ Get the URL of the .env settings / environment file. """ + env = self.apps[app_id].get ('env_obj', None) + if not env: + url = self.apps[app_id]['spec'] + env_url = os.path.join (os.path.dirname (url), ".env") + logger.debug (f"-- resolving settings for app: {app_id}") + response = self.http_session.get (env_url) + if response.status_code == 200: + logger.debug (f"-- got settings for {app_id}") + env = response.text + else: + logger.debug (f"-- using empty settings for {app_id}") + env = "" + self.apps[app_id]['env_obj'] = env + return env + + def status (self, request): + return self.client.status (request) + + def delete (self, request): + return self.client.delete (request) + + def update(self, request): + return self.client.patch(request) + + def start (self, principal, app_id, resource_request, host): + """ Get application metadata, docker-compose structure, settings, and compose API request. """ + logger.info(f"\nprincipal: {principal}\napp_id: {app_id}\n" + f"resource_request: {resource_request}\nhost: {host}") + spec = self.get_spec (app_id) + logger.debug(f"context.start - \nspec: {spec}") + settings = self.client.parse_env (self.get_settings (app_id)) + settings_all = self.get_env_registry(app_id, settings) + services = self.apps[app_id]['services'] + services = { k : { + "port" : str(v), + "clients" : [] + } for k, v in services.items () + } + logger.debug (f"parsed {app_id}\nsettings: {settings}\nsettings_all: {settings_all}") + """ Use a pre-existing k8s service account """ + service_account = self.apps[app_id]['serviceAccount'] if 'serviceAccount' in self.apps[app_id].keys() else None + """ Add entity's auth information """ + principal_params = {"username": principal.username, "access_token": principal.access_token, "refresh_token": principal.refresh_token, "host": host} + principal_params_json = json.dumps(principal_params, indent=4) + """ Security Context that are set for the app """ + spec["security_context"] = self.apps[app_id]["securityContext"] if 'securityContext' in self.apps[app_id].keys() else {} + spec["services"][app_id]["ext"] = self.apps[app_id]["ext"] if 'ext' in self.apps[app_id].keys() else None + + # If ephemeralStorage is set in the app's docker-compose.yaml then + # update the resources with the limits/reservations. We might want + # to give the user the ability to set these in the UI, if so then + # remove these few lines after doing so. + spec_limits_keys = spec['services'][app_id]['deploy']['resources']['limits'].keys() + if "ephemeralStorage" in spec_limits_keys: + dc_limits_es = spec['services'][app_id]['deploy']['resources']['limits']['ephemeralStorage'] + resource_request['deploy']['resources']['limits']['ephemeralStorage'] = dc_limits_es + spec_reservations_keys = spec['services'][app_id]['deploy']['resources']['reservations'].keys() + if "ephemeralStorage" in spec_reservations_keys: + dc_reservations_es = spec['services'][app_id]['deploy']['resources']['reservations']['ephemeralStorage'] + resource_request['deploy']['resources']['reservations']['ephemeralStorage'] = dc_reservations_es + + spec["services"][app_id].update(resource_request) + """ Certain apps might require appending a string to the custom URL. """ + conn_string = self.apps.get(app_id).get("conn_string", "") + spec["services"][app_id]["conn_string"] = conn_string + """ Add a proxy rewrite rule """ + proxy_rewrite_rule = self.apps.get(app_id).get("proxy-rewrite-rule", False) + proxy_rewrite = self.apps.get(app_id).get("proxy-rewrite", { "enabled":False, "target":None }) + spec["services"][app_id]["proxy_rewrite"] = proxy_rewrite + if proxy_rewrite_rule: spec["services"][app_id]["proxy_rewrite"]["enabled"] = True + """ Add gitea integration rule """ + gitea_integration = self.apps.get(app_id).get("gitea-integration", False) + spec["services"][app_id]["gitea_integration"] = gitea_integration + + if spec is not None: + system = self._start ({ + "name" : app_id, + "serviceaccount": service_account, + "env" : settings_all, + "system" : spec, + "principal" : principal_params_json, + "services" : services + }) + """ Validate resulting interfaces. """ + """ + TODO: + 1. Check returned status. + 2. The Ambassador based URL removes the need to pass back a port. Confirm & delete port code. + """ + running = { v.name : v.port for v in system.services } + for name, port in services.items (): + assert name in running, f"Svc {name} expected but {services.keys()} actually running." + logger.info ( + f" -- started app id:{app_id} user:{principal.username} id:{system.identifier} services:{list(running.items ())}") + return system + + def _start (self, request): + """ + Control low level application launching (start) logic. + Also provides an anchor point to mock the service in unit tests. + """ + return self.client.start (request) + +class NullContext (TychoContext): + """ + A null context to facilitate client development. + """ + def __init__(self,product="common"): + super ().__init__(product=product,stub=True) + + def status(self, request=None): + """ Make up some rows. """ + identifier = uuid.uuid4 () + return TychoStatus (**{ + "status" : "success", + "result" : [ + { + "name" : f"jupyter-ds-{str(identifier)}", + "app_id" : "jupyter-ds", + "sid" : str(identifier), + "ip_address" : 'x.y.z.m', + "port" : "8080", + "creation_time" : "time" + } for x in range(8000, 8005) + ], + "message" : "..." + }) + + def delete (self, request): + """ Ingore deletes. """ + logger.debug (f"-- delete: {request}") + + def start (self, principal, app_id): + logger.debug (f"-- start: {principal} {app_id}") + spec = self.get_spec (app_id) + #settings = self.client.parse_env (self.get_settings (app_id)) + settings = self._parse_env (self.get_settings (app_id)) + services = self.apps[app_id]['services'] + return TychoSystem (**{ + "status" : "ok", + "result" : { + "name" : self.apps[app_id]['name'], + "sid" : uuid.uuid4 (), + "containers" : { + k : { 'ip_address' : 'x.y.z', 'port-1' : v } + for k, v in services.items () + } + }, + "message" : "mock: testing..." + }) + +class ContextFactory: + """ Flexible method for connecting to a TychoContext. + Also, provide the null context for easy dev testing in appstore. """ + _state = {} + def __init__(self): + self.__dict__ = self._state + if hasattr(self, 'contexts'): + logger.debug("ContextFactory.__init__: contexts attribute exists") + else: + logger.debug("ContextFactory.__init__: creating contexts dictionary") + self.contexts = {} + def get (self, product, registry_config="app-registry.yaml", app_defaults_config="app-defaults.yaml", context_type="null", tycho_config_url=""): + logger.info (f"-- ContextFactory.get: registry_config: {registry_config} | app_defaults_config: {app_defaults_config} | product: {product} | tycho_config_url: {tycho_config_url} | context_type: {context_type}") + if context_type in self.contexts: + logger.debug(f"ContextFactory.get: returning existing context for {context_type}") + returnContext = self.contexts[context_type] + else: + logger.debug(f"ContextFactory.get: creating context for {context_type}") + if context_type == "null": + self.contexts[context_type] = NullContext(product=product) + returnContext = self.contexts[context_type] + elif context_type == "live": + self.contexts[context_type] = TychoContext(registry_config=registry_config, app_defaults_config=app_defaults_config, product=product, tycho_config_url=tycho_config_url, stub=False) + returnContext = self.contexts[context_type] + return returnContext \ No newline at end of file diff --git a/appstore/tycho/core.py b/appstore/tycho/core.py new file mode 100644 index 000000000..d3575b8f9 --- /dev/null +++ b/appstore/tycho/core.py @@ -0,0 +1,83 @@ +from tycho.config import Config +from tycho.factory import ComputeFactory +from tycho.factory import supported_backplanes +from tycho.model import System, ModifySystem + +class Tycho: + """ An organizing abstraction for the Tycho system. + + Tycho adds a layer of system architecture and policy support to + cloud native container orchestration platforms. It's true you can do just + about anything with the Kubernetes API. Tycho let's teams design, decide, + automate, test, and enforce what should be done. + + """ + + def __init__(self, + backplane="kubernetes", + config="conf/tycho.yaml"): + """ Construct a Tycho component. + + :param backplane: The name of the compute back end. Analogous to a compiler's + code emitter, the backplane's concern is to project a system + abstract syntax tree (AST) into a compute fabric specific + structure. + :param config: A configuration file for the system. + """ + self.backplane = backplane + self.config = Config (config) + self.compute = ComputeFactory.create_compute (self.config) + + def get_compute (self): + """ Get the Tycho API for the compute fabric. + + :returns: A compute fabric code emitter implementation specified to the constructor. + """ + return self.compute + + def parse (self, request): + """ Parse a request to construct an abstract syntax tree for a system. + + :param request: JSON object formatted to contain name, structure, env, and + service elements. Name is a string. Structue is the JSON + object resulting from loading a docker-compose.yaml. Env + is a JSON dictionary mapping environment variables to + values. These will be substituted into the specification. + Services is a JSON object representing which containers and + ports to expose, and other networking rules. + :returns: `.System` + """ + return System.parse ( + config=self.config, + name=request['name'], + principal=request.get('principal'), + system=request['system'], + service_account=request.get('serviceaccount', 'default'), + env=request.get ('env', {}), + services=request.get ('services', {})) + + def parse_modify(self, request): + """ Parse a request into a class representation of metadata and specs of a system to be modified. + + :param request: JSON object formatted to contain guid, labels, resources. + GUID is a hexadecimal string of UUID representing a system. + Labels is a dictionary of label and label-name as key-value pairs. + Resources is a dictionary of cpu and memory keys, with corresponding values. + Can optionally pass a config. + :returns: An instance of `tycho.model.ModifySystem` + """ + return ModifySystem.parse_modify( + config=self.config, + guid=request.get("tycho-guid", None), + labels=request.get("labels", {}), + cpu=request.get("cpu", None), + memory=request.get("memory", None)) + + @staticmethod + def is_valid_backplane (backplane): + """ Determine if the argument is a valid backplane. """ + return ComputeFactory.is_valid_backplane (backplane) + + @staticmethod + def supported_backplanes (): + return list(supported_backplanes) \ No newline at end of file diff --git a/appstore/tycho/dockerc.py b/appstore/tycho/dockerc.py new file mode 100644 index 000000000..4d7771828 --- /dev/null +++ b/appstore/tycho/dockerc.py @@ -0,0 +1,154 @@ +import json +import logging +import os +import random +import shutil +import glob +import subprocess +import sys +import threading +import traceback +import yaml +from tycho.compute import Compute +from tycho.model import System +from tycho.tycho_utils import TemplateUtils +from tycho.exceptions import DeleteException +from tycho.exceptions import StartException +from compose.cli.main import TopLevelCommand, project_from_options + +logger = logging.getLogger (__name__) + +class DockerComposeThread(threading.Thread): + """ Run Docker-Compose in a thread and communicate via subprocess. """ + + def __init__(self, system, port, configured, app_root): + """ Invoke thread init and connect the system. """ + threading.Thread.__init__(self) + self.system = system + self.port = port + self.container_map = {} + self.configured = configured + self.app_root = app_root + + def run (self): + """ Execute the system. """ + logger.debug (f"creating compose app: {self.system.identifier}") + os.makedirs (self.app_root) + #docker_compose_file = os.path.join (self.app_root, f"docker-compose.yaml") + docker_compose_file = os.path.join (self.app_root, f"{self.system.name}.yaml") + env_file = os.path.join (self.app_root, ".env") + + """ For now, write literal input text. TODO, generate to incoporate policy. """ + with open (docker_compose_file, 'w') as stream: + stream.write (self.system.source_text) + env = f"""HOST_PORT={self.port}\nLOCAL_STORE=./\n""" + print (f"--env----------> {env}") + with open (env_file, 'w') as stream: + stream.write (env) + + """ Find and return ports for each container. """ + config = yaml.safe_load (TemplateUtils.apply_environment ( + env, + self.system.source_text)) + logger.debug (f"Building conainer map for system {self.system.name}") + for c_name, c_config in config.get ('services', {}).items (): + print (f"--cname:{c_name} c_config:{c_config}") + self.container_map[c_name] = { + f"{c_name}-{i}" : port.split(':')[0] if ':' in port else port + for i, port in enumerate(c_config.get('ports', [])) + } + print (f"-- container map {self.container_map}") + + self.configured.set () + + # Run docker-compose in the directory. + logger.debug (f"Garbage collecting unused docker networks...") + p = subprocess.Popen( + [ "docker", "network", "prune", "--force" ], + stdout=subprocess.PIPE, + cwd=self.app_root) + + logger.debug (f"Running system {self.system.name} in docker-compose") + command = f"docker-compose --project-name {self.system.name} -f {self.system.name}.yaml up --detach" + print (command) + p = subprocess.Popen( + command.split (), + stdout=subprocess.PIPE, + cwd=self.app_root) + +class DockerComposeCompute(Compute): + def __init__(self, config): + self.config = config + self.app_root_base = "apps" + def start (self, system, namespace="default"): + import subprocess + """ Generate a globally unique identifier for the application. All associated + objects will share this identifier. """ + + app_root = os.path.join (self.app_root_base, system.identifier) + + """ Generate a unique port for the system. Needs to be generalized to multi-container + while somehow preserving locally meaningful port names.""" + port = random.randint (30000, 40000) + configured = threading.Event() + docker_compose_thread = DockerComposeThread (system, port, configured, app_root) + docker_compose_thread.start () + """ Wait for the thread to configure the app to run. + If this takes longer than five seconds, the thread has probably errored. Continue. """ + configured.wait (5) + return { + 'name' : system.name, + 'sid' : system.identifier, + 'containers' : docker_compose_thread.container_map + } + + def status (self, name, namespace="default"): + """ Report status of running systems. """ + print (os.getcwd ()) + apps = [ guid for guid in os.listdir(self.app_root_base) + if os.path.isdir(os.path.join(self.app_root_base, guid)) ] + result = [] + cur_dir = os.getcwd () + for app in apps: + app_root = os.path.join (self.app_root_base, app) + + command = f"docker-compose --project-name {app} ps".split () + logger.debug (f"--command: {command}") + p = subprocess.Popen( + command, + stdout=subprocess.PIPE, + cwd=app_root) + + p.stdout.readline () + for line in p.stdout: + print (line) + + result.append ({ + "name" : "--", + "sid" : app, + #"ip" : None, #"--", #ip_address, + "port" : "--" + }) + return result + + def delete (self, name, namespace="default"): + """ Delete the running process and persistent artifacts. """ + app_root = os.path.join (self.app_root_base, name) + + pattern = os.path.join (app_root, f"*{name}*.yaml") + print (pattern) + docker_compose_file = glob.glob (pattern) + print (docker_compose_file) + docker_compose_file = os.path.basename (docker_compose_file[0]) + project_name = docker_compose_file.replace (".yaml", "") + print (f"--project name: {project_name}") + + command = f"docker-compose --project-name {project_name} -f {docker_compose_file} down".split () + """ Wait for shutdown to complete. """ + p = subprocess.check_call( + command, + stdout=subprocess.PIPE, + cwd=app_root) + + """ Delete the app subtree. """ + shutil.rmtree (app_root) \ No newline at end of file diff --git a/appstore/tycho/exceptions.py b/appstore/tycho/exceptions.py new file mode 100644 index 000000000..3ef387922 --- /dev/null +++ b/appstore/tycho/exceptions.py @@ -0,0 +1,24 @@ +class TychoException(Exception): + def __init__(self, message, details=""): + super().__init__(message) + self.details = details + + +class StartException(Exception): + def __init__(self, message, details=""): + super().__init__(message, details) + + +class DeleteException(Exception): + def __init__(self, message, details=""): + super().__init__(message, details) + + +class ContextException(TychoException): + def __init__(self, message, details=""): + super().__init__(message, details) + + +class ModifyException(Exception): + def __init__(self, message, details=""): + super().__init__(message, details) \ No newline at end of file diff --git a/appstore/tycho/factory.py b/appstore/tycho/factory.py new file mode 100644 index 000000000..07bf5609a --- /dev/null +++ b/appstore/tycho/factory.py @@ -0,0 +1,30 @@ +import json +import logging +from tycho.kube import KubernetesCompute +from tycho.dockerc import DockerComposeCompute + +logger = logging.getLogger (__name__) + +config = { + "backplane" : "kubernetes" +} +config_factory = { + "kubernetes" : KubernetesCompute, + "docker-compose" : DockerComposeCompute +} +supported_backplanes = config_factory.keys () + +class ComputeFactory: + + @staticmethod + def is_valid_backplane (backplane): + return backplane in supported_backplanes + + @staticmethod + def set_backplane (backplane): + config['backplane'] = backplane + + @staticmethod + def create_compute (config): + backplane = config['tycho']['backplane'] + return config_factory[backplane](config=config) diff --git a/appstore/tycho/kube.py b/appstore/tycho/kube.py new file mode 100644 index 000000000..ff9f3fea6 --- /dev/null +++ b/appstore/tycho/kube.py @@ -0,0 +1,528 @@ +import json +import logging +import os +import shutil +import sys +import traceback +import base64 +from time import sleep +from kubernetes import client as k8s_client, config as k8s_config +from tycho.compute import Compute +from tycho.exceptions import DeleteException +from tycho.exceptions import StartException +from tycho.exceptions import TychoException +from tycho.exceptions import ModifyException +from tycho.model import System +from tycho.tycho_utils import TemplateUtils +import kubernetes.client +from kubernetes.client.rest import ApiException + +logger = logging.getLogger (__name__) + +port_forwards = {} + + +class KubernetesCompute(Compute): + """ A Kubernetes orchestrator implementation. + + Tested with Minikube and Google Kubernetes Engine. + """ + + def __init__(self, config): + """ Initialize connection to Kubernetes. + + Connects to Kubernetes configuration using an environment appropriate method. + """ + super(KubernetesCompute, self).__init__() + self.config = config + if os.getenv('KUBERNETES_SERVICE_HOST'): + """ We're running inside K8S. Load the config appropriately. """ + k8s_config.load_incluster_config() + else: + """ We're running outside of K8S. Load the config. """ + k8s_config.load_kube_config() + api_client = k8s_client.ApiClient() + self.api = k8s_client.CoreV1Api(api_client) + self.rbac_api = k8s_client.RbacAuthorizationV1Api(api_client) +# self.extensions_api = k8s_client.ExtensionsV1beta1Api(api_client) + self.extensions_api = k8s_client.AppsV1Api(api_client) + self.networking_api = k8s_client.NetworkingV1Api(api_client) + self.try_minikube = True + self.namespace = self.get_namespace ( + namespace=os.environ.get("NAMESPACE", self.get_namespace ())) + logger.debug (f"-- using namespace: {self.namespace}") + + def get_namespace(self, namespace="default"): + try: + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as secrets: + for line in secrets: + namespace = line + break + except Exception as e: + logger.debug(f"-- downward api namespace lookup failed.") + return namespace + + def check_volumes(self, volumes, namespace): + try: + volumesNA = [] + api_response = self.api.list_namespaced_persistent_volume_claim(namespace=namespace) + for index, volume in enumerate(volumes): + notExists = True + if volume["volume_name"] != "stdnfs": + for item in api_response.items: + if item.metadata.name != volume["volume_name"]: + continue + else: + notExists = False + logger.info(f"PVC {volume['volume_name']} exists.") + break + if notExists and volume["volume_name"] != 'stdnfs': + volumesNA.append(index) + #raise Exception(f"Cannot create system. PVC {volume['pvc_name']} does not exist. Create it.") + return volumesNA + except Exception as e: + logger.debug(f"Raising persistent volume claim exception. {e}") + #raise + + def is_ambassador_context(self, namespace, ambassador_service_name): + try: + field_sel_api_response = self.api.list_namespaced_service(field_selector=f"metadata.name={ambassador_service_name}", namespace=namespace) + return len(field_sel_api_response.items) == 1 + except ApiException as e: + logger.info(f"There was a problem assessing whether the ambassador service is running.", e) + + def start (self, system, namespace="default"): + """ Start an abstractly described distributed system on the cluster. + Generate each required K8s artifact and wire them together. Currently + explicitly modeled elements include Deployment, PersistentVolume, + PersistentVolumeClaim, and Service components. + + :param system: An abstract system model. + :type system: :class:`.System` + :param namespace: Namespace to run the system in. + :type namespace: str + """ + namespace = self.namespace #system.get_namespace() + try: + """ Check volumes and remove them from the system. """ + volumesNA = self.check_volumes(system.volumes, namespace) + systemVolumesCopy = [] + for index, value in enumerate(system.volumes): + if index not in volumesNA: + systemVolumesCopy.append(value) + system.volumes = systemVolumesCopy + """ Check the status of ambassador """ + amb_status = self.is_ambassador_context(namespace, system.ambassador_service_name) + if amb_status: + system.amb = True + #api_response = self.api.list_namespace() + #notExists = True + #for item in api_response.items: + # link = item.metadata.self_link + # app_ns = link.split("/")[-1] + # if app_ns == system.system_name: + # notExists = False + # logger.info(f"Namespace {system.system_name} exists. Skipping create.") + # break + #if notExists: + # ns_manifests = system.render(template="namespace.yaml") + # for ns_manifest in ns_manifests: + # logger.info(f"Namespace {system.system_name} created.") + # api_response = self.api.create_namespace(body=ns_manifest) + + try: + api_response = self.api.list_namespaced_secret(namespace=namespace) + for item in api_response.items: + if item.metadata.name == f"{system.system_name}-env": + for key, value in item.data.items(): + value = str(base64.b64decode(value), 'utf-8') + for container in system.containers: + container.env.append([key,TemplateUtils.render_string(value,container.env)]) + break + except ApiException as e: + logger.debug(f"App requires {system.system_name}-env configmap with envs: {e}") + ## TODO: Swallows exception. + + try: + for container in system.containers: + if container.name == system.system_name: + for port in container.ports: + system.system_port = port['containerPort'] + break + break + except Exception as e: + traceback.print_exc() + exc_type, exc_value, exc_traceback = sys.exc_info() + text = traceback.format_exception( + exc_type, exc_value, exc_traceback) + raise TychoException ( + message=f"Failed to get system port:", + details=text) + ## TODO: Why not catch at the end? + + """ Turn an abstract system model into a cluster specific representation. """ + pod_manifests = system.render ("pod.yaml") + #return {} + #""" Render a persistent volume claim. """ + #pvc_manifests = system.render(template="pvc.yaml") + #""" Create persistent volume claims. """ + #for pvc_manifest in pvc_manifests: + # if pvc_manifest["metadata"]["name"] != "nfs": + # response = self.api.create_namespaced_persistent_volume_claim( + # namespace=namespace, + # body=pvc_manifest) + #""" Render persistent volumes. """ + #pv_manifests = system.render(template="pv.yaml") + #""" Create the persistent volumes. """ + #for pv_manifest in pv_manifests: + # response = self.api.create_persistent_volume( + # body=pv_manifest) + + """ Create a deployment for the pod. """ + for pod_manifest in pod_manifests: + deployment,create_deployment_api_response = self.pod_to_deployment ( + name=system.name, + username=system.username, + identifier=system.identifier, + template=pod_manifest, + namespace=namespace) + + """ Create a network policy if appropriate. """ + if system.requires_network_policy (): + logger.debug ("creating network policy") + network_policy_manifests = system.render ( + template="policy/tycho-default-netpolicy.yaml") + for network_policy_manifest in network_policy_manifests: + logger.debug (f"applying network policy: {network_policy_manifest}") + network_policy = self.networking_api.create_namespaced_network_policy ( + body=network_policy_manifest, + namespace=namespace) + + """ Create service endpoints. """ + container_map = {} + counter = 0 + for container in system.containers: + """ Determine if a service is configured for this container. """ + service = system.services.get (container.name, None) + if service: + logger.debug (f"generating service for container {container.name}") + service_manifests = system.render ( + template = "service.yaml", + context = { "service" : service, "create_deployment_api_response":create_deployment_api_response } + ) + for service_manifest in service_manifests: + logger.debug (f"-- creating service for container {container.name}") + response = self.api.create_namespaced_service( + body=service_manifest, + namespace=namespace) + + ip_address = None + if not system.amb: + ip_address = self.get_service_ip_address (response) + + """ Return generated node ports to caller. """ + for port in response.spec.ports: + container_map[container.name] = { + "ip_address" : ip_address, + port.name : port.node_port + } + break + result = { + 'name' : system.name, + 'sid' : system.identifier, + 'containers' : container_map, + 'conn_string': system.conn_string + } + + except Exception as e: + self.delete (system.name) + exc_type, exc_value, exc_traceback = sys.exc_info() + text = traceback.format_exception( + exc_type, exc_value, exc_traceback) + raise StartException ( + message=f"Unable to start system: {system.name}", + details=text) + + logger.info (f"result of the app launch: {json.dumps(result,indent=2)}") + return result + + def get_service_ip_address (self, service_metadata): + """ Get the IP address for a service. On a system with a load balancer + that will be in the service status' load balancer section. On minikube, + we use the minikube IP address which is in the system config. + + + :param service: Service metadata. + :returns: ip_address IP Address of the service. + """ + ip_address = None if os.environ.get("DEV_PHASE", "prod") != "test" else "127.0.0.1" + try: + app_id = service_metadata.metadata.labels["tycho-app"] + logger.info (f"-================================> *** {app_id}") + if not app_id in port_forwards: + port_forwards[app_id] = app_id #process.pid + sleep (3) + logger.debug (f"--------------> {service_metadata.spec.ports}") + port = service_metadata.spec.ports[0].port + node_port = service_metadata.spec.ports[0].node_port + if node_port is None: + node_port = service_metadata.spec.ports[0].target_port + exe = shutil.which ('kubectl') + command = f"{exe} port-forward --pod-running-timeout=3m0s deployment/{app_id} {node_port}:{port}" + logger.debug (f"-- port-forward: {command}") + #ip_address = "127.0.0.1" + except Exception as e: + traceback.print_exc () + logger.debug (f"service {service_metadata.metadata.name} ingress ip: {ip_address}") + return ip_address + + def pod_to_deployment (self, name, username, identifier, template, namespace="default"): + """ Create a deployment specification based on a pod template. + + :param name: Name of the system. + :type name: str + :param template: Relative path to the template to use. + :type template: str + :param identifier: Unique key to this system. + :type identifier: str + :param namepsace: Namespace to run the pod in. + :type namespace: str + """ + namespace = self.namespace #self.get_namespace() + deployment_spec = k8s_client.V1DeploymentSpec( + replicas=1, + template=template, + selector=k8s_client.V1LabelSelector ( + match_labels = { + "tycho-guid" : identifier, + "username" : username + })) + + """ Instantiate the deployment object """ + logger.debug (f"creating deployment specification {template}") + deployment = k8s_client.V1Deployment( + api_version="apps/v1", + kind="Deployment", + metadata=k8s_client.V1ObjectMeta( + name=name, + labels={ + "tycho-guid" : identifier, + "executor" : "tycho", + "username" : username + }), + spec=deployment_spec) + + """ Create the deployment. """ + logger.debug (f"applying deployment {template}") + api_response = self.extensions_api.create_namespaced_deployment( + body=deployment, + namespace=namespace) + logger.debug (f"deployment created. status={api_response.status}") + return deployment,api_response + + def delete (self, name, namespace="default"): + """ Delete the deployment. + + :param name: GUID of the system to delete. + :type name: str + :param namespace: Namespace the system runs in. + :type namespace: str + """ + namespace = self.namespace #self.get_namespace() + try: + """ Treat everything with a namespace parameterized collections based delete + operator the same. """ + finalizers = { + "deployment" : self.extensions_api.delete_collection_namespaced_deployment, + "replica_set" : self.extensions_api.delete_collection_namespaced_replica_set, + "pod" : self.api.delete_collection_namespaced_pod, + "persistentvolumeclaim" : self.api.delete_collection_namespaced_persistent_volume_claim, + #"networkpolicy" : self.networking_api.delete_collection_namespaced_network_policy + } + for object_type, finalizer in finalizers.items (): + logger.debug (f" --deleting {object_type} elements of {name} in namespace {namespace}") + response = finalizer ( + label_selector=f"tycho-guid={name}", + namespace=namespace) + + except ApiException as e: + traceback.print_exc() + exc_type, exc_value, exc_traceback = sys.exc_info() + text = traceback.format_exception( + exc_type, exc_value, exc_traceback) + raise DeleteException ( + message=f"Failed to delete system: {name}", + details=text) + return { + } + + def status (self, name=None, username=None, namespace="default"): + """ Get status. + Without a name, this will get status for all running systems. + With a name, it will get status for the specified system. + + :param name: GUID of a system to get status for. + :type name: str + :param namespace: Namespace the system runs in. + :type namespace: str + """ + namespace = self.namespace + result = [] + + """ Find all our generated deployments. """ + label = f"tycho-guid={name}" if name else f"executor=tycho" + if username: + label = f"username={username}" if username else f"executor=tycho" + logger.debug (f"-- status label: {label}") + response = self.extensions_api.list_namespaced_deployment ( + namespace, + label_selector=label) + + if response is not None: + for item in response.items: + + """ Collect pod metrics for this deployment. """ + pod_resources = { + container.name : container.resources.limits + for container in item.spec.template.spec.containers + } + logger.debug(f"-- pod-resources {pod_resources}") + + item_guid = item.metadata.labels.get("tycho-guid", None) + item_username = item.metadata.labels.get("username", None) + + """ Get the creation timestamp""" + c_time = item.metadata.creation_timestamp + time = f"{c_time.month}-{c_time.day}-{c_time.year} {c_time.hour}:{c_time.minute}:{c_time.second}" + + """ Get the workspace name of the pod """ + workspace_name = item.spec.template.metadata.labels.get("app-name", "") + + """ Temporary variables so rest of the code doesn't break elsewhere. """ + ip_address = "127.0.0.1" + port = 80 + + desired_replicas = item.status.replicas + ready_replicas = item.status.ready_replicas + is_ready = ready_replicas == desired_replicas + + result.append( + { + "name": item.metadata.name, + "app_id": item.spec.template.metadata.labels.get('original-app-name', None), + "sid": item_guid, + "ip_address": ip_address, + "port": str(port), + "creation_time": time, + "username": item_username, + "utilization": pod_resources, + "workspace_name": workspace_name, + "is_ready": is_ready + } + ) + + return result + + def modify(self, system_modify): + """ + Returns a list of all patches, + + * metadata labels - Applied to each deployment resource including the pods managed by it. + * container resources - Are applied to each container in the pod managed by a deployment. + + Takes in a handle :class:`tycho.model.ModifySystem` with the following instance variables, + + * config - A default config for Tycho. + * guid - A unique guid to a system/deployment. + * labels - A dictionary of labels. + * resources - A dictionary containing cpu and memory as keys. + * containers - A list of containers the resources are applied to. + + :param system_modify: Spec and Metadata object + :type system_modify: class ModifySystem + + :returns: A list of patches applied + :rtype: A list + + """ + namespace = self.namespace + patches_applied = list() + try: + api_response = self.extensions_api.list_namespaced_deployment( + label_selector=f"tycho-guid={system_modify.guid}", + namespace=namespace).items + + if len(api_response) == 0: + raise Exception("No deployments found. Specify a valid GUID. Format {'guid': ''}.") + + for deployment in api_response: + + system_modify.containers = list() + # Need this step to get a comprehensive list of containers if it's multi container pod. + # Later for patching would need a merge key "name" and corresponding image. + containers = deployment.spec.template.spec.containers + system_modify.containers = containers + + generator = TemplateUtils(config=system_modify.config) + templates = list(generator.render("patch.yaml", context={"system_modify": system_modify})) + patch_template = templates[0] if len(templates) > 0 else {} + patches_applied.append(patch_template) + + _ = self.extensions_api.patch_namespaced_deployment( + name=deployment.metadata.name, + namespace=namespace, + body=patch_template + ) + + return { + "patches": patches_applied + } + + except (IndexError, ApiException, Exception) as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + text = traceback.format_exception(exc_type, exc_value, exc_traceback) + raise ModifyException( + message=f"Failed to modify system: {system_modify.guid}", + details=text + ) + + + +''' +our-pvc: + - dicom images ro-sidecar + - nfsrods for napari and imagej irods + - deepgtex rwm + +Given: +============================================================== +docker-compose.yaml: + ... + volumes: + pvc://deepgtex:/deepgtex:rw RW! + pvc://nfsrods:/nfsrods:rw RW? +-------------------------------------------------------------- +every container +* pvc://stdnfs/home/${username} -> /home/${username} RW +* pvc://stdnfs/data -> /shared R + sidecar? -> /data R + +options + user provides a pvc -> use that as pvc://stdnfs + +------ + +cluster A: + my-pvc is a RWM pvc. + bin/tycho api --docker --pvc my-pvc + +cluster B: + pvc-2 is a RWM pvc + bin/tycho api --docker --pvc pvc-2 + +------ + +clusterA: + my-rwm-storage-class + bin/tycho api --docker --rwm-sc my-rwm-storage-class +''' diff --git a/appstore/tycho/model.py b/appstore/tycho/model.py new file mode 100644 index 000000000..dc1df562b --- /dev/null +++ b/appstore/tycho/model.py @@ -0,0 +1,540 @@ +import logging +import ipaddress +import json +import os +from typing import OrderedDict, Dict, Any +import uuid +import yaml +from tycho.tycho_utils import TemplateUtils + +from cryptography.hazmat.primitives import serialization as crypto_serialization +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.hazmat.backends import default_backend as crypto_default_backend + +logger = logging.getLogger (__name__) + + +class Limits: + """ Abstraction of resource limits on a container in a system. """ + def __init__(self, + cpus=None, + gpus=None, + memory=None, + ephemeralStorage=None): + """ Create limits. + + :param cpus: Number of CPUs. May be a fraction. + :type cpus: str + :param gpus: Number of GPUs. + :type gpus: str + :param memory: Amount of memory + :type memory: str + :param ephemeralStorage: Amount of ephemeral storage + :type ephemeralStorage: str + """ + self.cpus = cpus + self.gpus = gpus + self.memory = memory + self.ephemeralStorage = ephemeralStorage + def __repr__(self): + return f"cpus:{self.cpus} gpus:{self.gpus} mem:{self.memory} ephemeralStorage:{self.ephemeralStorage}" + + +class Volumes: + def __init__(self, id, containers): + self.id = id + self.containers = containers + self.volumes = [] + self.pvcs = [] + + def volume(self, container_name, pvc_name, volume_name, path=None, subpath=None): + self.volumes.append({"container_name": container_name, "pvc_name": pvc_name, "volume_name": volume_name, "path": path, "subpath": subpath}) + + def process_volumes(self): + for index, container in enumerate(self.containers): + for index, volume in enumerate(container["volumes"]): + parts = volume.split(":") + if parts[0] == "pvc": + volume_name = parts[1].split("/")[2:3][0] + pvc_name = volume_name if volume_name not in self.pvcs else None + self.pvcs.append(volume_name) + path = parts[2] if len(parts) == 3 else None + subpath = "/".join(parts[1].split("/")[3:]) if len(parts) == 3 else None + self.volume(container['name'], pvc_name, volume_name, path, subpath) + else: + logger.debug(f"Volume definition should follow the pattern: pvc:///: or pvc://:") + raise Exception(f"Wrong Volume definition in Container:{container['name']} and Volume:{volume}") + return self.volumes + +class Probe: + def __init__(self,cmd=None,delay=None,period=None,threshold=None): + self.cmd = cmd + self.delay = delay + self.period = period + self.threshold = threshold + +class HttpProbe(Probe): + def __init__(self,delay=None,period=None,threshold=None,httpGet=None): + Probe.__init__(self,None,delay,period,threshold) + if httpGet != None: + self.path = httpGet.get("path","/") + self.port = httpGet.get("port",80) + self.httpHeaders = httpGet.get("httpHeaders",None) + +class TcpProbe(Probe): + def __init__(self,delay=None,period=None,threshold=None,tcpSocket=None): + Probe.__init__(self,None,delay,period,threshold) + if tcpSocket != None: + self.port = tcpSocket.get("port",None) + +class Container: + """ Invocation of an image in a specific infastructural context. """ + def __init__(self, + name, + image, + command=None, + env=None, + identity=None, + limits=None, + requests=None, + ports=[], + expose=[], + depends_on=None, + volumes=None, + liveness_probe=None, + readiness_probe=None): + """ Construct a container. + + :param name: Name the running container will be given. + :param image: Name of the image to use. + :param command: Text of the command to run. + :param env: Environment settings + :type env: dict + :param identity: UID of the user to run as. + :type identity: int + :param limits: Resource limits + :type limits: dict + :param requests: Resource requests + :type limits: dict + :param ports: Container ports to expose. + :type ports: list of int + :param volumes: List of volume mounts : + :type volumes: list of str + :param securityContext: Contains container security context, runAsUser and fsGroup + :type securityContext: dict + """ + self.name = name + self.image = image + self.identity = identity + self.limits = Limits(**limits) if isinstance(limits, dict) else limits + self.requests = Limits(**requests) if isinstance(requests, dict) else requests + logger.debug(f"requests: ${self.requests}\nlimits: ${self.limits}") + if isinstance(self.limits, list): + self.limits = self.limits[0] # TODO - not sure why this is a list. + self.ports = ports + self.expose = expose + self.depends_on = depends_on + self.command = command + self.env = \ + list(map(lambda v : list(map(lambda r: str(r), v.split('='))), env)) \ + if env else [] + self.volumes = volumes + if liveness_probe != None and liveness_probe.get('httpGet',None) != None: + self.liveness_probe = HttpProbe(**liveness_probe) + elif liveness_probe != None and liveness_probe.get('tcpSocket',None) != None: + self.liveness_probe = TcpProbe(**liveness_probe) + elif liveness_probe != None: + self.liveness_probe = Probe(**liveness_probe) + if readiness_probe != None and readiness_probe.get('httpGet',None) != None: + self.readiness_probe = HttpProbe(**readiness_probe) + elif readiness_probe != None and readiness_probe.get('tcpSocket',None) != None: + self.readiness_probe = TcpProbe(**readiness_probe) + elif readiness_probe != None: + self.readiness_probe = Probe(**readiness_probe) + + def __repr__(self): + return f"name:{self.name} image:{self.image} id:{self.identity} limits:{self.limits}" + + + +class System: + """ Distributed system of interacting containerized software. """ + def __init__(self, config, name, principal, service_account, conn_string, proxy_rewrite, containers, identifier, + gitea_integration, services={}, security_context={}, init_security_context={}): + """ Construct a new abstract model of a system given a name and set of containers. + + Serves as context for the generation of compute cluster specific artifacts. + + :param config: Configuration information. + :type name: `Config` + :param name: Name of the system. + :type name: str + :param containers: List of container specifications. + :type containers: list of containers + """ + self.config = config + self.identifier = identifier + self.system_name = name + self.amb = False + self.irods_enabled = False + self.nfrods_uid = '' + self.dev_phase = os.getenv('DEV_PHASE', "prod") + self.name = f"{name}-{self.identifier}" + assert self.name is not None, "System name is required." + containers_exist = len(containers) > 0 + none_are_null = not any([ c for c in containers if c == None ]) + assert containers_exist and none_are_null, "System container elements may not be null." + logger.info(f"=======> Constructing system from containers = {containers}") + self.containers = list(map(lambda v : Container(**v), containers)) \ + if isinstance(containers[0], dict) else \ + containers + """ Construct a map of services. """ + self.services = { + service_name : Service(**service_def) + for service_name, service_def in services.items () + } + for name, service in self.services.items (): + service.name = f"{name}-{self.identifier}" + service.name_noid = name + self.volumes = Volumes(self.identifier, containers).process_volumes() + self.source_text = None + self.system_port = None + self.ambassador_id = self._get_ambassador_id() + """ System environment variables """ + self.system_env = dict(principal) + """ System tags """ + self.username = principal.get("username") + username_remove_us = self.username.replace("_", "-") + username_remove_dot = username_remove_us.replace(".", "-") + self.username_all_hyphens = username_remove_dot + self.host = principal.get("host") + self.annotations = {} + self.namespace = "default" + self.serviceaccount = service_account + self.enable_init_container = os.environ.get("TYCHO_APP_ENABLE_INIT_CONTAINER", "true") + self.conn_string = conn_string + """PVC flags and other variables for default volumes""" + self.create_home_dirs = os.environ.get("CREATE_HOME_DIRS", "false").lower() + self.stdnfs_pvc = os.environ.get("STDNFS_PVC", "stdnfs") + self.parent_dir = os.environ.get('PARENT_DIR', 'home') + self.subpath_dir = os.environ.get('SUBPATH_DIR', self.username) + self.shared_dir = os.environ.get('SHARED_DIR', 'shared') + """Default UID and GID for the system""" + default_security_context = self.config.get('tycho')['compute']['system']['defaults']['securityContext'] + self.default_run_as_user = default_security_context.get('uid', '1000') + self.default_run_as_group = default_security_context.get('gid', '1000') + """Override container security context""" + if os.environ.get("NFSRODS_UID"): + self.security_context = { "run_as_user": os.environ.get("NFSRODS_UID")} + else: + self.security_context = security_context + """init security context""" + self.init_security_context = init_security_context + """Resources and limits for the init container""" + self.init_image_repository = os.environ.get("TYCHO_APP_INIT_IMAGE_REPOSITORY", "busybox") + self.init_image_tag = os.environ.get("TYCHO_APP_INIT_IMAGE_TAG", "latest") + self.init_cpus = os.environ.get("TYCHO_APP_INIT_CPUS", "250m") + self.init_memory = os.environ.get("TYCHO_APP_INIT_MEMORY", "250Mi") + self.gpu_resource_name = os.environ.get("TYCHO_APP_GPU_RESOURCE_NAME", "nvidia.com/gpu") + """Proxy rewrite rule for ambassador service annotations""" + self.proxy_rewrite = proxy_rewrite + # """Flag for checking if an IRODS connection is enabled""" + if os.environ.get("IROD_HOST") != None: + logger.info("Irods host enabled") + self.irods_enabled = True + self.nfsrods_host = os.environ.get('NFSRODS_HOST', '') + else: + logger.info("Irods host not enabled") + """gitea settings""" + self.gitea_integration = gitea_integration + self.gitea_host = os.environ.get("GITEA_HOST", " ") + self.gitea_user = os.environ.get("GITEA_USER", " ") + self.gitea_service_name = os.environ.get("GITEA_SERVICE_NAME", " ") + self.ambassador_service_name = os.environ.get("AMBASSADOR_SVC_NAME", "") + + @staticmethod + def set_security_context(sc_from_registry): + security_context: dict[str, Any] = {} + if os.environ.get("NFSRODS_UID"): + security_context["run_as_user"] = os.environ.get("NFSRODS_UID") + else: + security_context["run_as_user"] = sc_from_registry.get("runAsUser") + if os.environ.get("TYCHO_APP_RUN_AS_USER"): + security_context["run_as_user"] = os.environ.get("TYCHO_APP_RUN_AS_USER") + if "runAsUser" in sc_from_registry.keys(): + security_context["run_as_user"] = str(sc_from_registry.get("runAsUser")) + else: + security_context["run_as_user"] = os.environ.get("TYCHO_APP_RUN_AS_USER", "0") + if "runAsGroup" in sc_from_registry.keys(): + security_context["run_as_group"] = str(sc_from_registry.get("runAsGroup")) + else: + security_context["run_as_group"] = os.environ.get("TYCHO_APP_RUN_AS_GROUP", "0") + if "fsGroup" in sc_from_registry.keys(): + security_context["fs_group"] = str(sc_from_registry.get("fsGroup")) + else: + security_context["fs_group"] = os.environ.get("TYCHO_APP_FS_GROUP", "0") + return security_context + + @staticmethod + def set_init_security_context(sc_from_registry): + init_security_context = {} + if "initRunAsUser" in sc_from_registry.keys(): + init_security_context["run_as_user"] = str(sc_from_registry.get("initRunAsUser")) + else: + init_security_context["run_as_user"] = os.environ.get("INIT_SC_RUN_AS_USER", "0") + if "initRunAsGroup" in sc_from_registry.keys(): + init_security_context["run_as_group"] = str(sc_from_registry.get("initRunAsGroup")) + else: + init_security_context["run_as_group"] = os.environ.get("INIT_SC_RUN_AS_GROUP", "0") + return init_security_context + + def _get_ambassador_id(self): + return os.environ.get("AMBASSADOR_ID", "") + + @staticmethod + def get_identifier(): + return uuid.uuid4().hex + + def _get_init_resources(self): + resources = self.config.get('tycho')['compute']['system']['defaults']['services']['init']['resources'] + return resources + + def get_namespace(self, namespace="default"): + try: + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as secrets: + for line in secrets: + namespace = line + break + except Exception as e: + logger.warning(f"error getting namespace from file: {e}") + return namespace + + def requires_network_policy (self): + return any ([ len(svc.clients) > 0 for name, svc in self.services.items () ]) + + def render (self, template, context={}): + """ Supply this system as a context to a template. + + :param template: Template + """ + final_context = { "system" : self } + for n, v in context.items (): + final_context[n] = v + generator = TemplateUtils (config=self.config) + template = generator.render (template, context=final_context) + logger.debug (f"--generated template: {template}") + return template + + @staticmethod + def env_from_components(spec,env): + env_from_spec = (spec.get('env', []) or spec.get('environment', [])) + env_from_registry = [] + for k in env: + if "STDNFS_PVC" in env[k]: env_from_registry.append(f"{k}={os.environ.get('STDNFS_PVC')}") + else: env_from_registry.append(TemplateUtils.render_string(f"{k}={env[k]}",env)) + return env_from_spec + env_from_registry + + @staticmethod + def parse (config, name, principal, system, service_account, env={}, services={}): + """ Construct a system model based on the input request. + + Parses a docker-compose spec into a system specification. + + :param name: Name of the system. + :param system: Parsed docker-compose specification. + :param env: Dictionary of settings. + :param services: Service specifications - networking configuration. + """ + security_context = System.set_security_context(system.get("security_context", {})) + init_security_context = System.set_init_security_context(system.get("security_context", {})) + principal = json.loads(principal) + identifier = System.get_identifier() + containers = [] + if env != None: + env['identifier'] = identifier + env['username'] = principal.get('username',"Unknown") + system_port = None + for cname,spec in system.get('services',{}).items(): + env['system_name'] = cname + for p in spec.get('ports', []): + if ':' in p: system_port = p.split(':')[1] + else: system_port = p + break + if system_port != None: env['system_port'] = system_port + else: env['system_port'] = 8000 + logger.debug ("applying environment settings.") + system_template = yaml.dump (system) + logger.debug (f"System.parse - system_template:\n{json.dumps(system_template,indent=2)}") + logger.debug (f"System.parse - env:\n{json.dumps(env,indent=2)}") + system_rendered = TemplateUtils.render_text(template_text=system_template,context=env) + logger.debug (f"System.parse - system_rendered:\n {system_rendered}") + for system_render in system_rendered: + system = system_render + + """ Model each service. """ + logger.debug (f"compose {system}") + for cname, spec in system.get('services', {}).items (): + """ Entrypoint may be a string or an array. Deal with either case.""" + ports = [] + expose = [] + entrypoint = spec.get ('entrypoint', '') + """ Adding default volumes to the system containers """ + if spec.get('volumes') == None: + spec.update({'volumes': []}) + rep = { + 'stdnfs_pvc': os.environ.get('STDNFS_PVC', 'stdnfs'), + 'username': principal.get("username"), + 'parent_dir': os.environ.get('PARENT_DIR', 'home'), + 'subpath_dir': os.environ.get('SUBPATH_DIR', principal.get("username")), + 'shared_dir': os.environ.get('SHARED_DIR', 'shared'), + } + if os.environ.get("DEV_PHASE", "prod") != "test": + try: + for volume in config.get('tycho')['compute']['system']['volumes']: + createHomeDirs = os.environ.get('CREATE_HOME_DIRS', "true") + volSplit = volume.split(":") + if createHomeDirs == "false" and ("username" in volume or "shared_dir" in volSplit[1]): + continue + if createHomeDirs == "true" and ("shared_dir" not in volSplit[1] and "subpath_dir" not in volSplit[2]): + continue + for k, v in rep.items(): + volume = volume.replace(k, v) + spec.get('volumes', []).append(volume) + except Exception as e: + logger.info("No volumes specified in the configuration.") + """ Adding entrypoint to container if exists """ + if isinstance(entrypoint, str): + entrypoint = entrypoint.split () + for p in spec.get('ports', []): + if ':' in p: + ports.append({ + 'containerPort': p.split(':')[1] + }) + else: + ports.append({ + 'containerPort': p + }) + for e in spec.get('expose', []): + expose.append({ + 'containerPort': e + }) + """Parsing env variables""" + env_all = System.env_from_components(spec,env) + if spec.get("ext",None) != None and spec.get("ext").get("kube",None) != None: + liveness_probe = spec["ext"]["kube"].get('livenessProbe',None) + readiness_probe = spec["ext"]["kube"].get('readinessProbe',None) + if isinstance(liveness_probe,str) and liveness_probe == "none": liveness_probe = None + if isinstance(readiness_probe,str) and readiness_probe == "none": readiness_probe = None + else: + liveness_probe = None + readiness_probe = None + containers.append({ + "name": cname, + "image": spec['image'], + "command": entrypoint, + "env": env_all, + "limits": spec.get('deploy',{}).get('resources',{}).get('limits',{}), + "requests": spec.get('deploy',{}).get('resources',{}).get('reservations',{}), + "ports": ports, + "expose": expose, + "depends_on": spec.get("depends_on", []), + "volumes": [v for v in spec.get("volumes", [])], + "liveness_probe": liveness_probe, + "readiness_probe": readiness_probe + }) + system_specification = { + "config": config, + "name": name, + "principal": principal, + "service_account": service_account, + "conn_string": spec.get("conn_string", ""), + "proxy_rewrite": spec.get("proxy_rewrite", { 'target':None, 'enabled':False }), + "containers": containers, + "identifier": identifier, + "gitea_integration": spec.get("gitea_integration", False), + "services": services, + "security_context": security_context, + "init_security_context": init_security_context + } + if spec.get('proxy_rewrite_rule') != None: + system_specification["proxy_rewrite"]["enabled"] = spec.get('proxy_rewrite_rule') + logger.debug (f"parsed-system: {json.dumps(system_specification, indent=2)}") + system = System(**system_specification) + system.source_text = yaml.dump (system) + return system + + def __repr__(self): + return f"name:{self.name} containers:{self.containers}" + + +class ModifySystem: + """ + This is a class representation of a system's metadata and specs that needs to be modified. + + :param config: A default config for Tycho + :type config: A dict + :param guid: A unique guid to a system/deployment + :type guid: The UUID as a 32-character hexadecimal string + :param labels: A dictionary of labels that are applied to deployments + :type labels: A dictionary + :param resources: A dictionary containing cpu and memory as keys + :type resources: A dictionary + :param containers: A list of containers that are applied to resources + :type containers: A list of Kubernetes V1Container objects, optional + """ + def __init__(self, config, patch, guid, labels, resources): + """ + A constructor method to ModifySystem + """ + self.config = config + self.patch = patch + self.guid = guid + self.labels = labels + self.resources = resources + self.containers = [] + + @staticmethod + def parse_modify(config, guid, labels, cpu, memory): + """ + Returns an instance of :class:`tycho.model.ModifySystem` class + + :returns: An instance of ModifySystem class + :rtype: A class object + """ + + resources = {} + if cpu is not None: + resources.update({"cpu": cpu}) + if memory is not None: + resources.update({"memory": memory}) + + if len(resources) > 0 or len(labels) > 0: + patch = True + else: + patch = False + + modify_system = ModifySystem( + config, + patch, + guid, + labels, + resources, + ) + return modify_system + + def __repr__(self): + return f"name: {self.guid} labels: {self.labels} resources: {self.resources}" + + +class Service: + """ Model network connectivity rules to the system. """ + def __init__(self, port=None, clients=[]): + """ Construct a service object modeling network connectivity to a system. """ + self.port = port + self.clients = list(map(lambda v: str(ipaddress.ip_network (v)), clients)) + self.name = None + self.name_noid = None + + def __repr__(self): + return json.dumps ( + f"service: {json.dumps({'port':self.port,'clients':self.clients}, indent=2)}") \ No newline at end of file diff --git a/appstore/tycho/template/cluster/jupyter-ds/clusterrole.yaml b/appstore/tycho/template/cluster/jupyter-ds/clusterrole.yaml new file mode 100644 index 000000000..8360408be --- /dev/null +++ b/appstore/tycho/template/cluster/jupyter-ds/clusterrole.yaml @@ -0,0 +1,10 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ system.system_name }} + labels: + name: {{ system.system_name }} +rules: +- apiGroups: [""] + resources: ["pods", "deployments"] + verbs: ["get", "watch", "list"] diff --git a/appstore/tycho/template/cluster/nextflow/clusterrole.yaml b/appstore/tycho/template/cluster/nextflow/clusterrole.yaml new file mode 100644 index 000000000..a63fa54fd --- /dev/null +++ b/appstore/tycho/template/cluster/nextflow/clusterrole.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ system.system_name }} + labels: + name: {{ system.system_name }} + tycho-guid: {{ system.identifier }} +rules: +- apiGroups: [""] + resources: ["pods", "deployments"] + verbs: ["get", "watch", "list"] diff --git a/appstore/tycho/template/cluster/nginx/clusterrole.yaml b/appstore/tycho/template/cluster/nginx/clusterrole.yaml new file mode 100644 index 000000000..8360408be --- /dev/null +++ b/appstore/tycho/template/cluster/nginx/clusterrole.yaml @@ -0,0 +1,10 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ system.system_name }} + labels: + name: {{ system.system_name }} +rules: +- apiGroups: [""] + resources: ["pods", "deployments"] + verbs: ["get", "watch", "list"] diff --git a/appstore/tycho/template/clusterrole.yaml b/appstore/tycho/template/clusterrole.yaml new file mode 100644 index 000000000..8360408be --- /dev/null +++ b/appstore/tycho/template/clusterrole.yaml @@ -0,0 +1,10 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ system.system_name }} + labels: + name: {{ system.system_name }} +rules: +- apiGroups: [""] + resources: ["pods", "deployments"] + verbs: ["get", "watch", "list"] diff --git a/appstore/tycho/template/clusterrolebinding.yaml b/appstore/tycho/template/clusterrolebinding.yaml new file mode 100644 index 000000000..ededa5c67 --- /dev/null +++ b/appstore/tycho/template/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ system.system_name }}-global-2 + labels: + name: {{ system.system_name }} +#subjects: +#- kind: Group +# name: manager +# apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: {{ system.system_name }} + apiGroup: rbac.authorization.k8s.io diff --git a/appstore/tycho/template/jupyter-ds/clusterrolebinding.yaml b/appstore/tycho/template/jupyter-ds/clusterrolebinding.yaml new file mode 100644 index 000000000..ededa5c67 --- /dev/null +++ b/appstore/tycho/template/jupyter-ds/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ system.system_name }}-global-2 + labels: + name: {{ system.system_name }} +#subjects: +#- kind: Group +# name: manager +# apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: {{ system.system_name }} + apiGroup: rbac.authorization.k8s.io diff --git a/appstore/tycho/template/mapping.yaml b/appstore/tycho/template/mapping.yaml new file mode 100644 index 000000000..a3122ad07 --- /dev/null +++ b/appstore/tycho/template/mapping.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: ambassador/v1 +kind: Mapping +name: {{system.name}}-mapping +host: helx-app-commonsshare-org +prefix: {{system.system_name}}/{{system.username}}/{{system.identifier}} +headers: + remote_user: {{system.username}} +service: {{system.name}}:{{system.system_port}} +bypass_auth: true +timeout_ms: 300000 diff --git a/appstore/tycho/template/nextflow/clusterrolebinding.yaml b/appstore/tycho/template/nextflow/clusterrolebinding.yaml new file mode 100644 index 000000000..35f3e4e54 --- /dev/null +++ b/appstore/tycho/template/nextflow/clusterrolebinding.yaml @@ -0,0 +1,32 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: view-global + labels: + name: {{ system.system_name }} + tycho-guid: {{ system.identifier }} +subjects: + - kind: ServiceAccount + name: default + namespace: default +roleRef: + kind: ClusterRole + name: view + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: edit-global + labels: + name: {{ system.system_name }} + tycho-guid: {{ system.identifier }} +subjects: + - kind: ServiceAccount + name: default + namespace: default +roleRef: + kind: ClusterRole + name: edit + apiGroup: rbac.authorization.k8s.io + diff --git a/appstore/tycho/template/nginx/clusterrolebinding.yaml b/appstore/tycho/template/nginx/clusterrolebinding.yaml new file mode 100644 index 000000000..fc5db67ff --- /dev/null +++ b/appstore/tycho/template/nginx/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ system.system_name }}-global + labels: + name: {{ system.system_name }} +#subjects: +#- kind: Group +# name: manager +# apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: {{ system.system_name }} + apiGroup: rbac.authorization.k8s.io diff --git a/appstore/tycho/template/patch.yaml b/appstore/tycho/template/patch.yaml new file mode 100644 index 000000000..95b0cbb78 --- /dev/null +++ b/appstore/tycho/template/patch.yaml @@ -0,0 +1,28 @@ +{% if system_modify.patch %} +spec: + template: + {% if system_modify.labels|length > 0 %} + metadata: + labels: + {% for key, value in system_modify.labels.items() %} + {{ key }}: {{ value }} + {% endfor %} + {% endif %} + {% if system_modify.resources|length > 0 %} + spec: + containers: + {% for container in system_modify.containers %} + - resources: + limits: + {% for key, value in system_modify.resources.items() %} + {{ key }}: {{ value }} + {% endfor %} + requests: + {% for key, value in system_modify.resources.items() %} + {{ key }}: {{ value }} + {% endfor %} + name: {{ container.name }} + image: {{ container.image }} + {% endfor %} + {% endif %} +{% endif %} \ No newline at end of file diff --git a/appstore/tycho/template/pod.yaml b/appstore/tycho/template/pod.yaml new file mode 100644 index 000000000..d54e8f3f9 --- /dev/null +++ b/appstore/tycho/template/pod.yaml @@ -0,0 +1,302 @@ +--- +# +# A Kubernetes Pod manifest template. Projects a +# Tycho system object into a YAML pod definition. +# Tags the object with a system unique +# GUID label to enable later management. +# +# Generated by Tycho {{ now() }} +# +apiVersion: v1 +kind: Pod +metadata: + name: {{ system.name }} + labels: + name: {{ system.name }} + username: {{ system.username }} + app-name: {{ system.system_name }} + original-app-name: {{ system.system_name }} + reaper-label: {{ system.system_name }} + executor: tycho + tycho-guid: {{ system.identifier }} + tycho-app-id: {{ system.app_id }} +spec: +{% if system.serviceaccount %} + serviceAccountName: {{system.serviceaccount}} +{% endif %} +{% for container in system.containers %} + {% if container.limits or container.requests %} + {% if container.limits.gpus != None and loop.first %} + tolerations: + - key: "{{ system.gpu_resource_name }}" + operator: "Exists" + effect: "NoSchedule" + {% endif %} + {% endif %} + {% if system.security_context["run_as_user"] or system.security_context.run_as_group or system.security_context.fs_group %} + securityContext: + {% if system.security_context["run_as_user"] %} + runAsUser: {{ system.security_context["run_as_user"] }} + {% endif %} + {% if system.security_context.run_as_group %} + runAsGroup: {{ system.security_context.run_as_group }} + {% endif %} + {% if system.security_context.fs_group %} + fsGroup: {{ system.security_context.fs_group }} + {% endif %} + {% endif %} +{% if (system.enable_init_container == "true") and (system.create_home_dirs == "true") and (system.dev_phase != "test") %} + initContainers: + - name: volume-tasks + image: {{ system.init_image_repository }}:{{ system.init_image_tag }} + {% if system.init_security_context.run_as_user or system.init_security_context.run_as_group %} + securityContext: + {% if system.init_security_context.run_as_user %} + runAsUser: {{ system.init_security_context.run_as_user }} + {% endif %} + {% if system.init_security_context.run_as_group %} + runAsGroup: {{ system.init_security_context.run_as_group }} + {% endif %} + {% endif %} + # Fixed resources + resources: + requests: + memory: {{ system.init_memory }} + cpu: {{ system.init_cpus }} + limits: + memory: {{ system.init_memory }} + cpu: {{ system.init_cpus }} + command: [ 'sh', '-c' ] + args: + - mkdir -p {{ system.parent_dir }}/{{ system.subpath_dir }} && + mkdir -p {{ system.parent_dir }}/{{ system.shared_dir }} && + {% if system.gitea_integration == True %} + mkdir -p {{ system.parent_dir }}/{{ system.subpath_dir }}/.ssh && + echo -e "Host {{ system.gitea_host }}\n Hostname {{ system.gitea_service_name }}\n User {{ system.gitea_user }}\n IdentityFile ~/.ssh/id_gitea" > {{ system.parent_dir }}/{{ system.subpath_dir }}/.ssh/config && + {% endif %} + ls -aln {{ system.parent_dir }} && + echo OK + volumeMounts: + - name: {{ system.stdnfs_pvc }} + mountPath: {{ system.parent_dir }} +{% endif %} +{% endfor %} + containers: +{% for container in system.containers %} + - name: {{ container.name }} + image: {{ container.image }} + {% if system.security_context.run_as_user or system.security_context.run_as_group %} + securityContext: + {% if system.security_context.run_as_user %} + runAsUser: {{ system.security_context.run_as_user }} + {% endif %} + {% if system.security_context.run_as_group %} + runAsGroup: {{ system.security_context.run_as_group }} + {% endif %} + {% endif %} +{% if container.command %} + command: {{ container.command }} +{% endif %} + env: +{% if container.env %} + {%for e in container.env %} + - name : {{ e[0] }} + value : "{{ e[1] }}" + {% endfor %} + - name : GUID + value: {{ system.identifier }} + - name: USER_NAME + value: {{ system.username }} + - name: USER + value: {{ system.username }} + {% if system.amb %} + - name: NB_PREFIX + value: /private/{{system.system_name}}/{{system.username}}/{{system.identifier}} + - name: FB_BASEURL + value: /private/{{system.system_name}}/{{system.username}}/{{system.identifier}} + {% else %} + - name: NB_PREFIX + value: / + - name: FB_BASEURL + value: / + {% endif %} +{% endif %} + - name: HOST + value: {{ system.host }} +{% if not container.env %} + - name : GUID + value: {{ system.identifier }} + - name: USER_NAME + value: {{ system.username }} + - name: USER + value: {{ system.username }} + {% if system.amb %} + - name: NB_PREFIX + value: /private/{{system.system_name}}/{{system.username}}/{{system.identifier}} + - name: FB_BASEURL + value: /private/{{system.system_name}}/{{system.username}}/{{system.identifier}} + {% else %} + - name: NB_PREFIX + value: / + - name: FB_BASEURL + value: / + {% endif %} +{% endif %} +{% if system.system_env %} + {% for env in system.system_env %} + - name: {{ env }} + value: {{ system.system_env[env]}} + {% endfor %} +{% endif %} +{% if container.expose|length > 0 %} + ports: +{% for port in container.expose %} + - containerPort: {{ container.expose[loop.index-1]['containerPort'] }} + protocol: TCP +{% endfor %} +{% endif %} # ports +{% if container.limits or container.requests %} + resources: + limits: + {% if container.limits %} + {% if container.limits.cpus != None %} + cpu: "{{ container.limits.cpus }}" + memory: "{{ container.limits.memory }}" + {% endif %} + {% if container.limits.gpus != None %} + {{ system.gpu_resource_name }}: {{ container.limits.gpus }} + {% endif %} + {% if container.limits.ephemeralStorage != None and container.limits.ephemeralStorage != "0" and container.limits.ephemeralStorage != "" %} + ephemeral-storage: {{ container.limits.ephemeralStorage }} + {% endif %} + {% endif %} + requests: + {% if container.requests %} + {% if container.requests.cpus != None %} + cpu: "{{ container.requests.cpus }}" + memory: "{{ container.requests.memory }}" + {% endif %} + {% if container.requests.gpus != None %} + {{ system.gpu_resource_name }}: {{ container.requests.gpus }} + {% endif %} + {% if container.requests.ephemeralStorage != None and container.requests.ephemeralStorage != "0" and container.requests.ephemeralStorage != "" %} + ephemeral-storage: {{ container.requests.ephemeralStorage }} + {% endif %} + {% endif %} +{% endif %} + volumeMounts: + {% if system.irods_enabled == True %} + - name: nfs + mountPath: "/home/nfs" + {% endif %} + {% if system.gitea_integration == True %} + - name: {{ system.username_all_hyphens }}-id-gitea + mountPath: {{ system.parent_dir }}/{{ system.subpath_dir }}/.ssh/id_gitea + subPath: id_gitea + readOnly: true + {% endif %} +{% if container.volumes %} +{% for volume in system.volumes %} + {% if container.name == volume['container_name'] %} + - name: {{ volume["volume_name"] }} + mountPath: {{ volume["path"] }} + subPath: {{ volume["subpath"] }} + readOnly: false + {% endif %} + {% endfor %} + {% endif %} +{% if container.liveness_probe %} + livenessProbe: + {% if container.liveness_probe.cmd %} + exec: + command: + {% for arg in container.liveness_probe.cmd %} + - {{ arg }} + {% endfor %} + {% elif container.liveness_probe.port and container.liveness_probe.path %} + httpGet: + path: {{ container.liveness_probe.path }} + port: {{ container.liveness_probe.port }} + {% if container.liveness_probe.httpHeaders %} + httpHeaders: + {% for name in container.liveness_probe.httpHeaders %} + - name: {{ name }} + value: {{ container.liveness_probe.httpHeaders[name] }} + {% endfor %} + {% endif %} + {% elif container.liveness_probe.port %} + tcpSocket: + port: {{ container.liveness_probe.port }} + {% endif %} + {% if container.liveness_probe.delay %} + initialDelaySeconds: {{ container.liveness_probe.delay }} + {% endif %} + {% if container.liveness_probe.period %} + periodSeconds: {{ container.liveness_probe.period }} + {% endif %} + {% if container.liveness_probe.threshold %} + failureThreshold: {{ container.liveness_probe.threshold }} + {% endif %} +{% endif %} +{% if container.readiness_probe %} + readinessProbe: + {% if container.readiness_probe.cmd %} + exec: + command: + {% for arg in container.readiness_probe.cmd %} + - {{ arg }} + {% endfor %} + {% elif container.readiness_probe.port and container.readiness_probe.path %} + httpGet: + path: {{ container.readiness_probe.path }} + port: {{ container.readiness_probe.port }} + {% if container.readiness_probe.httpHeaders %} + httpHeaders: + {% for name in container.readiness_probe.httpHeaders %} + - name: {{ name }} + value: {{ container.readiness_probe.httpHeaders[name] }} + {% endfor %} + {% endif %} + {% elif container.readiness_probe.port %} + tcpSocket: + port: {{ container.readiness_probe.port }} + {% endif %} + {% if container.readiness_probe.delay %} + initialDelaySeconds: {{ container.readiness_probe.delay }} + {% endif %} + {% if container.readiness_probe.period %} + periodSeconds: {{ container.readiness_probe.period }} + {% endif %} + {% if container.readiness_probe.threshold %} + failureThreshold: {{ container.readiness_probe.threshold }} + {% endif %} +{% endif %} +{% endfor %} + volumes: + {% if system.irods_enabled == True %} + - name: nfs + nfs: + server: {{ system.nfsrods_host }} + path: / + {% endif %} + {% if system.gitea_integration == True %} + - name: {{ system.username_all_hyphens }}-id-gitea + secret: + secretName: {{ system.username_all_hyphens }}-id-gitea + defaultMode: 0600 + {% endif %} +# Changes +{% for container in system.containers %} +{% if container.volumes %} +{% for volume in system.volumes %} + {% if container.name == volume['container_name']%} + {% if volume["pvc_name"] != None %} + - name: {{ volume["volume_name"] }} + persistentVolumeClaim: + claimName: {{ volume["pvc_name"] }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endfor %} + diff --git a/appstore/tycho/template/policy/tycho-default-netpolicy.yaml b/appstore/tycho/template/policy/tycho-default-netpolicy.yaml new file mode 100644 index 000000000..cae853382 --- /dev/null +++ b/appstore/tycho/template/policy/tycho-default-netpolicy.yaml @@ -0,0 +1,33 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ system.identifier }}-netpolicy + labels: + executor: tycho + tycho-guid: {{ system.identifier }} +spec: + podSelector: + matchLabels: + tycho-guid: {{ system.identifier }} + policyTypes: + - Ingress + - Egress + ingress: + {% if system.services|length > 0 %} + - from: + {% for name, service in system.services.items () %} + {% for ip_block in service.clients %} + - ipBlock: + cidr: {{ ip_block }} + {% endfor %} + {% endfor %} + - podSelector: + matchLabels: + tycho-guid: {{ system.identifier }} + ports: + {% for name, service in system.services.items () %} + - protocol: TCP + port: {{ service.port }} + {% endfor %} + {% endif %} + diff --git a/appstore/tycho/template/pv.yaml b/appstore/tycho/template/pv.yaml new file mode 100644 index 000000000..beb76c0ce --- /dev/null +++ b/appstore/tycho/template/pv.yaml @@ -0,0 +1,27 @@ +{% for volume in system.volumes %} +{% if volume["requires_nfs"] == "no" %} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ volume["volume_name"] }} + labels: + executor: tycho + username: {{ system.username }} + tycho-guid: {{ system.identifier }} +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + persistentVolumeReclaimPolicy: Recycle +{% if volume["host_path"] %} + hostPath: + path: {{ volume["host_path"] }} +{% else %} + gcePersistentDisk: + pdName: {{ volume["disk_name"] }} + {% endif %} +{% endif %} +{% endfor %} diff --git a/appstore/tycho/template/pvc.yaml b/appstore/tycho/template/pvc.yaml new file mode 100644 index 000000000..0decbd149 --- /dev/null +++ b/appstore/tycho/template/pvc.yaml @@ -0,0 +1,23 @@ +{% for volume in system.volumes %} +{% if volume["requires_nfs"] == "no" %} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + name: {{ system.name }} + username: {{ system.username }} + executor: tycho + tycho-guid: {{ system.identifier }} + name: {{ volume["claim_name"] }} +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + volumeName: {{ volume['volume_name'] }} +status: {} +{% endif %} +{% endfor %} diff --git a/appstore/tycho/template/service.yaml b/appstore/tycho/template/service.yaml new file mode 100644 index 000000000..a41aaaccb --- /dev/null +++ b/appstore/tycho/template/service.yaml @@ -0,0 +1,72 @@ +# Generated by Tycho {{ now() }} +apiVersion: v1 +kind: Service +metadata: + labels: + name: {{ service.name }} + username: {{ system.username }} + executor: tycho + tycho-app: {{ system.name }} + tycho-guid: {{ system.identifier }} + conn_string: {{ system.conn_string }} + ownerReferences: + - apiVersion: apps/v1 + controller: true + kind: Deployment + name: {{ create_deployment_api_response.metadata.name }} + uid: {{ create_deployment_api_response.metadata.uid }} + name: {{ service.name }} + {% if system.amb %} + annotations: + getambassador.io/config: | + --- + apiVersion: ambassador/v1 + kind: Mapping + name: {{system.name}}-mapping + {% if system.ambassador_id|length > 0 %} + ambassador_id: {{ system.ambassador_id }} + {% endif %} + prefix: /private/{{system.system_name}}/{{system.username}}/{{system.identifier}}/{{system.conn_string}} + service: {{system.name}}:{{system.system_port}} + {% if system.dev_phase != 'dev' %} + headers: + REMOTE_USER: {{system.username}} + {% endif %} + {% if system.proxy_rewrite.enabled == True %} + {% if system.proxy_rewrite.target == None %} + rewrite: /private/{{system.system_name}}/{{system.username}}/{{system.identifier}}/{{system.conn_string}} + {% else %} + rewrite: {{ system.proxy_rewrite.target }} + add_response_headers: + X-Original-Path: /private/{{system.system_name}}/{{system.username}}/{{system.identifier}}/{{system.conn_string}} + {% endif %} + {% endif %} + retry_policy: + retry_on: gateway-error + num_retries: 10 + bypass_auth: true + timeout_ms: 300000 + idle_timeout_ms: 500000 + connect_timeout_ms: 500000 + use_websocket: true + {% endif %} +resourceversion: v1 +spec: + {% if system.amb %} + type: ClusterIP + {% else %} + type: LoadBalancer + {% endif %} + selector: + name: {{ system.name }} + ports: + {% for container in system.containers %} + {% if container.name == service.name_noid %} + {% for port in container.ports %} + - name: port-{{ loop.index }} + port: {{ container.ports[loop.index-1]['containerPort'] }} + protocol: TCP + targetPort: {{ container.ports[loop.index-1]['containerPort'] }} + {% endfor %} + {% endif %} + {% endfor %} diff --git a/appstore/tycho/tycho_utils.py b/appstore/tycho/tycho_utils.py new file mode 100644 index 000000000..812b23a9e --- /dev/null +++ b/appstore/tycho/tycho_utils.py @@ -0,0 +1,142 @@ +import datetime +import json +import logging +import netifaces +import os +import string +import traceback +import yaml +from jinja2 import Template + +logger = logging.getLogger (__name__) + +class TemplateUtils: + """ Utilities for generating text. """ + + def __init__(self, config): + self.config = config + + def render (self, template, context): + """Render a template object given a context. """ + result=None + template_path = None + + """ First, allow the user to override the default templates with custom templates. + Check for a template with this name in the user provided paths. """ + alternate_paths = self.config['tycho']['templates']['paths'] + for path in alternate_paths: + if os.path.exists (path): + template_path = os.path.join (path, template) + if os.path.exists (template_path): + logger.debug (f"using user supplied template: {template_path}") + else: + template_path = None + else: + logger.warning (f"template path {path} is configured but does not exist.") + + if not template_path: + """ Still no template. Look for it in the default design. """ + template_path = os.path.join (os.path.dirname (__file__), "template", template) + if not os.path.exists (template_path): + template_path = None + + if not template_path: + raise ValueError ( + f"No template {template} found in default location or in {alternate_paths}") + + logger.debug (f"applying template {template_path}") + with open(template_path, "r") as stream: + template_text = stream.read () + result = TemplateUtils.render_text (template_text, context) + return result + + @staticmethod + def render_text (template_text, context): + """ Render the text of a template given a context. """ + #logger.debug (template_text) + #logger.debug (context) + template = Template (template_text) + template.globals['now'] = datetime.datetime.utcnow + text = template.render (**context) + logger.debug (f"TemplateUtils.render_text - {text}") + return yaml.load_all (text, Loader=yaml.SafeLoader) + + @staticmethod + def render_string(s,context): + tmpl = Template(s) + tmpl.globals['now'] = datetime.datetime.utcnow + return tmpl.render(context) + + @staticmethod + def apply_environment (environment, text): + """ Given an environment configuration consisting of lines of Bash style variable assignemnts, + parse the variables and apply them to the given text.""" + resolved = text + if environment: + mapping = { + line.split("=", maxsplit=1)[0] : line.split("=", maxsplit=1)[1] + for line in environment.split ("\n") if '=' in line + } + resolved = string.Template(text).safe_substitute (**mapping) + logger.debug (f"environment={json.dumps (mapping, indent=2)}") + logger.debug (resolved) + return resolved + + @staticmethod + def trunc (a_string, max_len=80): + return (a_string[:max_len] + '..') if len(a_string) > max_len else a_string + +class NetworkUtils: + @staticmethod + def get_client_ip (request, debug=False): + """ Get the IP address of the client. Account for requests from proxies. + In debug mode, ignore loopback and try to get an IP from a n interface.""" + ip_addr = request.remote_addr + if request.headers.getlist("X-Forwarded-For"): + ip_addr = request.headers.getlist("X-Forwarded-For")[0] + if debug: + interface = netifaces.ifaddresses ('en0') + ip_addr = interface[2][0]['addr'] + logger.debug (f"(debug mode ip addr:)--> {ip_addr}") + return ip_addr + +class Resource: + @staticmethod + def get_resource_path(resource_name): + # Given a string resolve it to a module relative file path unless it is already an absolute path. + resource_path = resource_name + if not resource_path.startswith (os.sep): + resource_path = os.path.join (os.path.dirname (__file__), resource_path) + return resource_path + + @staticmethod + def load_json (path): + result = None + with open (path, 'r') as stream: + result = json.loads (stream.read ()) + return result + + @staticmethod + def load_yaml (path): + result = None + with open (path, 'r') as stream: + result = yaml.safe_load (stream.read ()) + return result + + def get_resource_obj (self, resource_name, format=None): + # TODO: Fix bug where format could be different than resource's file extension in file name + result = None + if not format: + if resource_name.endswith ('.yaml'): + format = 'yaml' + else: + format = 'json' + path = Resource.get_resource_path (resource_name) + if os.path.exists (path): + m = { + 'json' : Resource.load_json, + 'yaml' : Resource.load_yaml + } + if format in m: + result = m[format](path) + return result diff --git a/bin/authorizeuser.py b/bin/authorizeuser.py index 203e79b78..5e26fae6e 100644 --- a/bin/authorizeuser.py +++ b/bin/authorizeuser.py @@ -43,12 +43,14 @@ def irods_user_create(username,uid): if AUTH_USERS: users = re.split(r'[\s,]+',AUTH_USERS) for user in users: - if AuthorizedUser.objects.filter(email=user): - print(f'{"User already in Authorized Users list ----> add skipping"}') + is_email = re.match(r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$',user) is not None + is_authorized_user = AuthorizedUser.objects.filter(email=user) if is_email else AuthorizedUser.objects.filter(username=user) + if is_authorized_user: + print(f"User {user} already in Authorized Users list ----> add skipping") else: - u = AuthorizedUser(email=user) + u = AuthorizedUser(email=user) if is_email else AuthorizedUser(username=user) u.save() - print(f"Added {user} to the Authorized Users list ----> add success") + print(f"Added {user} to the Authorized Users list ----> add success") if IROD_AUTH_USERS: print("IRODS USERS ENABLED") @@ -56,14 +58,12 @@ def irods_user_create(username,uid): for line in f: user,uid = line.split("::") irods_user_create(user,uid) - - - if REMOVE_AUTH_USERS: users = re.split(r'[\s,]+',REMOVE_AUTH_USERS) for user in users: - a_user = AuthorizedUser.objects.filter(email=user) + is_email = re.match(r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$',user) is not None + a_user = AuthorizedUser.objects.filter(email=user) if is_email else AuthorizedUser.objects.filter(username=user) if a_user: a_user.delete() print(f"Removed {user} from Authorized Users list ----> remove success") diff --git a/bin/authorizeuser.sh b/bin/authorizeuser.sh index 7870ba0b3..dd37166d3 100755 --- a/bin/authorizeuser.sh +++ b/bin/authorizeuser.sh @@ -21,12 +21,14 @@ manageauthorizedusers () { for user in "${USERS[@]}"; do cat < add skipping") +is_email = re.match(r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$',user) is not None +is_authorized_user = AuthorizedUser.objects.filter(email="$user") if is_email else AuthorizedUser.objects.filter(username="$user") +if is_authorized_user: + print(f"User {"$user"} already in Authorized Users list ----> add skipping") else: - u = AuthorizedUser(email="$user") + u = AuthorizedUser(email="$user") if is_email else AuthorizedUser(username="$user") u.save() - print(f"Added {'$user'} to the Authorized Users list ----> add success") + print(f"Added {"$user"} to the Authorized Users list ----> add success") EOF done fi @@ -38,12 +40,13 @@ EOF for user in "${USERS[@]}"; do cat < remove success") + print(f"Removed {user} from Authorized Users list ----> remove success") else: - print(f"{'$user'} not in Authorized Users list ----> remove skipping") + print(f"{$user} not in Authorized Users list ----> remove skipping") EOF done fi diff --git a/bin/createtestusers.py b/bin/createtestusers.py index e4442a3a0..e48d3d0ed 100644 --- a/bin/createtestusers.py +++ b/bin/createtestusers.py @@ -17,8 +17,10 @@ django_user.save() if AuthorizedUser.objects.filter(email=email): print(f'{"User already in Authorized Users list ----> add skipping"}') + elif AuthorizedUser.objects.filter(username=username): + print(f'{"User already in Authorized Users list ----> add skipping"}') else: - u = AuthorizedUser(email=email) + u = AuthorizedUser(email=email, username=username) u.save() print(f"Added {username} to the Authorized Users list ----> add success") else: diff --git a/requirements.txt b/requirements.txt index 80ae4663b..aba937ced 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,23 +1,36 @@ -Django==3.2 -django-allauth==0.54.0 -django-cors-headers==3.7.0 -django-crispy-forms==1.11.2 -django-debug-toolbar==3.2 -django-extensions==3.1.1 -django-saml2-auth==2.2.1 -djangorestframework==3.12.2 -drf-spectacular==0.15.1 +Django==4.2 +django-allauth==0.61.1 +django-cors-headers==4.3.1 +django-crispy-forms==2.1 +django-debug-toolbar==4.3.0 +django-extensions==3.2.3 +grafana-django-saml2-auth==3.12.0 +djangorestframework==3.14.0 +drf-spectacular==0.27.1 flake8==3.9.0 gunicorn==20.1.0 mock==4.0.2 -pysaml2 -python3-openid==3.1.0 +pysaml2==7.4.2 +python3-openid==3.2.0 requests==2.31.0 -requests-oauthlib +requests-oauthlib==1.4.0 selenium==3.141.0 -tycho-api>=1.17 webdriver-manager==3.2.1 -sqlparse==0.4.2 -asgiref==3.4.1 +sqlparse==0.4.4 +asgiref==3.7.2 psycopg2-binary -python-irodsclient==1.1.5 \ No newline at end of file +python-irodsclient==1.1.5 +deepmerge==1.0.1 +flasgger==0.9.5 +Flask==2.0.3 +flask_cors==3.0.10 +flask_restful==0.3.9 +GitPython==3.1.11 +Jinja2==3.0.3 +jsonschema==3.2.0 +kubernetes==25.3.0 +netifaces==0.11.0 +PyYAML==5.4.1 +requests==2.31.0 +docker-compose==1.29.2 +requests_cache==0.9.2