From 1aa0f78a606e77c6d82406ddf40184292a86c41e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 23 Jan 2025 17:43:37 +0000 Subject: [PATCH] deploy: 081290b777c2f466e5cb7a88c7de6401672d637b --- 404.html | 4 ++-- assets/js/{410931d6.aaee0d12.js => 410931d6.52466794.js} | 2 +- .../js/{runtime~main.ea2549ce.js => runtime~main.22af30d0.js} | 2 +- blog/archive/index.html | 4 ++-- blog/first-blog-post/index.html | 4 ++-- blog/index.html | 4 ++-- blog/long-blog-post/index.html | 4 ++-- blog/mdx-blog-post/index.html | 4 ++-- blog/tags/docusaurus/index.html | 4 ++-- blog/tags/facebook/index.html | 4 ++-- blog/tags/hello/index.html | 4 ++-- blog/tags/hola/index.html | 4 ++-- blog/tags/index.html | 4 ++-- blog/welcome/index.html | 4 ++-- docs/about/CODE_OF_CONDUCT/index.html | 4 ++-- docs/about/GOVERNANCE/index.html | 4 ++-- docs/about/index.html | 4 ++-- docs/category/-governance/index.html | 4 ++-- docs/category/-information-sharing/index.html | 4 ++-- docs/category/-software-lifecycle/index.html | 4 ++-- docs/category/application-starter-kits/index.html | 4 ++-- docs/category/contributions/index.html | 4 ++-- docs/category/security/index.html | 4 ++-- docs/contribute/contributing/development-process/index.html | 4 ++-- docs/contribute/contributing/index.html | 4 ++-- docs/contribute/contributing/introduction/index.html | 4 ++-- .../contribute/contributing/know-before-contribute/index.html | 4 ++-- docs/contribute/contributing/ways-to-contribute/index.html | 4 ++-- docs/contribute/submit-best-practice/index.html | 4 ++-- docs/guides/checklist/index.html | 4 ++-- docs/guides/documentation/change-log/index.html | 4 ++-- docs/guides/documentation/documentation-hosts/index.html | 4 ++-- .../trade-study-hostingdocs-user/index.html | 4 ++-- .../documentation/documentation-hosts/use-cases/index.html | 4 ++-- docs/guides/documentation/readme/index.html | 4 ++-- .../governance/contributions/code-of-conduct/index.html | 4 ++-- .../governance/contributions/contributing-guide/index.html | 4 ++-- .../governance/contributions/issue-templates/index.html | 4 ++-- docs/guides/governance/contributions/pull-requests/index.html | 4 ++-- docs/guides/governance/governance-model/index.html | 4 ++-- docs/guides/search/index.html | 4 ++-- .../application-starter-kits/python-starter-kit/index.html | 4 ++-- docs/guides/software-lifecycle/continuous-delivery/index.html | 4 ++-- .../continuous-integration-frameworks/index.html | 4 ++-- .../software-lifecycle/continuous-integration/index.html | 4 ++-- .../continuous-integration/reference-architecture/index.html | 4 ++-- .../continuous-testing/TESTING-example/index.html | 4 ++-- docs/guides/software-lifecycle/continuous-testing/index.html | 4 ++-- .../continuous-testing/testing-frameworks/index.html | 4 ++-- docs/guides/software-lifecycle/metrics/index.html | 4 ++-- .../security/container-vulnerability-scanning/index.html | 4 ++-- .../software-lifecycle/security/github-security/index.html | 4 ++-- .../software-lifecycle/security/secrets-detection/index.html | 4 ++-- docs/join/index.html | 4 ++-- index.html | 4 ++-- markdown-page/index.html | 4 ++-- 56 files changed, 110 insertions(+), 110 deletions(-) rename assets/js/{410931d6.aaee0d12.js => 410931d6.52466794.js} (72%) rename assets/js/{runtime~main.ea2549ce.js => runtime~main.22af30d0.js} (99%) diff --git a/404.html b/404.html index fc673654..54774486 100644 --- a/404.html +++ b/404.html @@ -11,13 +11,13 @@ - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/410931d6.aaee0d12.js b/assets/js/410931d6.52466794.js similarity index 72% rename from assets/js/410931d6.aaee0d12.js rename to assets/js/410931d6.52466794.js index 2097b83f..991f831b 100644 --- a/assets/js/410931d6.aaee0d12.js +++ b/assets/js/410931d6.52466794.js @@ -1 +1 @@ -"use strict";(self.webpackChunkdocusaurus=self.webpackChunkdocusaurus||[]).push([[6025],{3905:(e,t,n)=>{n.d(t,{Zo:()=>u,kt:()=>g});var a=n(7294);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function o(e){for(var t=1;t=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var s=a.createContext({}),p=function(e){var t=a.useContext(s),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},u=function(e){var t=p(e.components);return a.createElement(s.Provider,{value:t},e.children)},c="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},d=a.forwardRef((function(e,t){var n=e.components,i=e.mdxType,r=e.originalType,s=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),c=p(n),d=i,g=c["".concat(s,".").concat(d)]||c[d]||m[d]||r;return n?a.createElement(g,o(o({ref:t},u),{},{components:n})):a.createElement(g,o({ref:t},u))}));function g(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var r=n.length,o=new Array(r);o[0]=d;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l[c]="string"==typeof e?e:i,o[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>s,contentTitle:()=>o,default:()=>m,frontMatter:()=>r,metadata:()=>l,toc:()=>p});var a=n(7462),i=(n(7294),n(3905));const r={title:"Continuous Delivery",description:"A streamlined guide to setting up a robust continuous delivery pipeline, automating releases, and maintaining best practices."},o="Continuous Delivery",l={unversionedId:"guides/software-lifecycle/continuous-delivery/readme",id:"guides/software-lifecycle/continuous-delivery/readme",title:"Continuous Delivery",description:"A streamlined guide to setting up a robust continuous delivery pipeline, automating releases, and maintaining best practices.",source:"@site/docs/guides/software-lifecycle/continuous-delivery/readme.md",sourceDirName:"guides/software-lifecycle/continuous-delivery",slug:"/guides/software-lifecycle/continuous-delivery/",permalink:"/slim/docs/guides/software-lifecycle/continuous-delivery/",draft:!1,editUrl:"https://github.com/nasa-ammos/slim/tree/main/docs/guides/software-lifecycle/continuous-delivery/readme.md",tags:[],version:"current",frontMatter:{title:"Continuous Delivery",description:"A streamlined guide to setting up a robust continuous delivery pipeline, automating releases, and maintaining best practices."},sidebar:"guidesSidebar",previous:{title:"Secrets Detection",permalink:"/slim/docs/guides/software-lifecycle/security/secrets-detection/"},next:{title:"Continuous Integration",permalink:"/slim/docs/guides/software-lifecycle/continuous-integration/"}},s={},p=[{value:"Introduction",id:"introduction",level:2},{value:"Background",id:"background",level:3},{value:"Approach",id:"approach",level:3},{value:"Key Use Cases",id:"key-use-cases",level:3},{value:"Quick Start",id:"quick-start",level:2},{value:"Step-by-Step Guide",id:"step-by-step-guide",level:2},{value:"1. Select Package Repositories",id:"1-select-package-repositories",level:3},{value:"1.1 Code Packages",id:"11-code-packages",level:4},{value:"Python Packages",id:"python-packages",level:5},{value:"Java Packages",id:"java-packages",level:5},{value:"NodeJS Packages",id:"nodejs-packages",level:5},{value:"1.2 Container Images",id:"12-container-images",level:4},{value:"Public Containers",id:"public-containers",level:5},{value:"Private Containers",id:"private-containers",level:5},{value:"1.3 Test Data",id:"13-test-data",level:4},{value:"Small Datasets (<2GB)",id:"small-datasets-2gb",level:5},{value:"Medium Datasets (2GB-100GB)",id:"medium-datasets-2gb-100gb",level:5},{value:"Large Datasets (>100GB)",id:"large-datasets-100gb",level:5},{value:"2. Implement Naming Conventions",id:"2-implement-naming-conventions",level:3},{value:"2.1 Package Naming",id:"21-package-naming",level:4},{value:"2.2 Version Naming",id:"22-version-naming",level:4},{value:"3. Automate Publishing",id:"3-automate-publishing",level:3},{value:"3.1 GitHub Actions Workflow",id:"31-github-actions-workflow",level:4},{value:"3.2 Automated Testing Integration",id:"32-automated-testing-integration",level:4},{value:"4. Maintain Delivery Pipeline",id:"4-maintain-delivery-pipeline",level:3},{value:"5. GitHub Actions Workflow Example for PyPI Project Continuous Delivery",id:"5-github-actions-workflow-example-for-pypi-project-continuous-delivery",level:3},{value:"Frequently Asked Questions (FAQ)",id:"frequently-asked-questions-faq",level:2},{value:"Credits",id:"credits",level:2},{value:"Feedback and Contributions",id:"feedback-and-contributions",level:2}],u={toc:p},c="wrapper";function m(e){let{components:t,...r}=e;return(0,i.kt)(c,(0,a.Z)({},u,r,{components:t,mdxType:"MDXLayout"}),(0,i.kt)("h1",{id:"continuous-delivery"},"Continuous Delivery"),(0,i.kt)("pre",{align:"center"},"A streamlined guide to setting up a robust continuous delivery pipeline, automating releases, and maintaining best practices."),(0,i.kt)("h2",{id:"introduction"},"Introduction"),(0,i.kt)("h3",{id:"background"},"Background"),(0,i.kt)("p",null,"Continuous Delivery (CD) is the practice of automatically preparing code changes for production release, extending Continuous Integration (CI) to ensure that every validated change is always production-ready. This guide presents a simplified, practical approach to implementing CD through standardized repository selections, naming conventions, and automation."),(0,i.kt)("h3",{id:"approach"},"Approach"),(0,i.kt)("p",null,"Adopt a clear, four-step plan to implement Continuous Delivery effectively:"),(0,i.kt)("ol",null,(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("strong",{parentName:"li"},"Choose repositories"),"."),(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("strong",{parentName:"li"},"Adopt standardized naming conventions"),"."),(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("strong",{parentName:"li"},"Automate publishing"),"."),(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("strong",{parentName:"li"},"Maintain the delivery pipeline"),".")),(0,i.kt)("mermaid",{value:"graph TD\n Repository[Choose Repositories] --\x3e Naming[Adopt Naming
Conventions] --\x3e Automate[Automate Publishing]\n Automate --\x3e Maintain[Maintain Pipeline]\n Maintain --\x3e Repository"}),(0,i.kt)("h3",{id:"key-use-cases"},"Key Use Cases"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"Auto-publishing built artifacts to package managers."),(0,i.kt)("li",{parentName:"ul"},"Standardizing naming conventions across repositories."),(0,i.kt)("li",{parentName:"ul"},"Versioning releases using semantic versioning."),(0,i.kt)("li",{parentName:"ul"},"Distributing test data automatically."),(0,i.kt)("li",{parentName:"ul"},"Automating container image publication."),(0,i.kt)("li",{parentName:"ul"},"Enabling infrastructure-as-code deployment.")),(0,i.kt)("h2",{id:"quick-start"},"Quick Start"),(0,i.kt)("p",null,"The most important step in setting up continuous delivery is choosing the right repositories and implementing proper naming conventions."),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Key Concepts to Get Started:")),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},(0,i.kt)("a",{parentName:"strong",href:"#package-repositories"},"\u2b07\ufe0f Choose a Package Repository"))," based on your artifact type:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"PyPI for Python packages"),(0,i.kt)("li",{parentName:"ul"},"Maven Central for Java"),(0,i.kt)("li",{parentName:"ul"},"NPM Registry for NodeJS"),(0,i.kt)("li",{parentName:"ul"},"ECR (Amazon Elastic Container Registry)/DockerHub for Containers")),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"\ud83d\udcdd Implement ",(0,i.kt)("a",{parentName:"strong",href:"#naming-conventions"},"Standardized Naming Conventions"),":")),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"nasa-[project-org]-[module-name]")," for Python"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"gov.nasa.[project-org].[module-name]")," for Java"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"@nasa-[project-org]/[module-name]")," for NodeJS")),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"\ud83d\ude80 Set up ",(0,i.kt)("a",{parentName:"strong",href:"#automated-publishing"},"Automated Publishing"))," using GitHub Actions"),(0,i.kt)("h2",{id:"step-by-step-guide"},"Step-by-Step Guide"),(0,i.kt)("h3",{id:"1-select-package-repositories"},"1. Select Package Repositories"),(0,i.kt)("p",null,"Choose appropriate repositories based on your artifact type:"),(0,i.kt)("h4",{id:"11-code-packages"},"1.1 Code Packages"),(0,i.kt)("h5",{id:"python-packages"},"Python Packages"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": PyPI"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Size Limit"),": 60MB"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Cost"),": Free"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Python libraries and tools"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Setup Steps"),":",(0,i.kt)("ol",{parentName:"li"},(0,i.kt)("li",{parentName:"ol"},"Create account on PyPI"),(0,i.kt)("li",{parentName:"ol"},"Set up project with ",(0,i.kt)("inlineCode",{parentName:"li"},"setup.py")," or ",(0,i.kt)("inlineCode",{parentName:"li"},"pyproject.toml")),(0,i.kt)("li",{parentName:"ol"},"Configure automated publishing")))),(0,i.kt)("h5",{id:"java-packages"},"Java Packages"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": Maven Central"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Size Limit"),": No specific limit"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Cost"),": Free"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Java libraries and frameworks"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Setup Steps"),":",(0,i.kt)("ol",{parentName:"li"},(0,i.kt)("li",{parentName:"ol"},"Create Sonatype account"),(0,i.kt)("li",{parentName:"ol"},"Configure Maven settings"),(0,i.kt)("li",{parentName:"ol"},"Set up GPG signing")))),(0,i.kt)("h5",{id:"nodejs-packages"},"NodeJS Packages"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": NPM Registry"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Size Limit"),": No specific limit"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Cost"),": Free"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": JavaScript/TypeScript packages"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Setup Steps"),":",(0,i.kt)("ol",{parentName:"li"},(0,i.kt)("li",{parentName:"ol"},"Create NPM account"),(0,i.kt)("li",{parentName:"ol"},"Configure package.json"),(0,i.kt)("li",{parentName:"ol"},"Set up automated publishing")))),(0,i.kt)("h4",{id:"12-container-images"},"1.2 Container Images"),(0,i.kt)("h5",{id:"public-containers"},"Public Containers"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": GitHub Packages/GitLab Registry"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Open source projects"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Limitations"),": Higher latency for runtime")),(0,i.kt)("h5",{id:"private-containers"},"Private Containers"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": Amazon ECR"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Production deployments"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Features"),": Low-latency pulls, private repos")),(0,i.kt)("h4",{id:"13-test-data"},"1.3 Test Data"),(0,i.kt)("h5",{id:"small-datasets-2gb"},"Small Datasets (<2GB)"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": GitHub/GitLab Releases"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Naming"),": ",(0,i.kt)("inlineCode",{parentName:"li"},"[project-org]-[project-module]-test-dataset")),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Unit test data, small samples")),(0,i.kt)("h5",{id:"medium-datasets-2gb-100gb"},"Medium Datasets (2GB-100GB)"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": Amazon S3"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Features"),": Pre-signed URLs, bandwidth control"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Integration test data")),(0,i.kt)("h5",{id:"large-datasets-100gb"},"Large Datasets (>100GB)"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": EOSDIS DAAC (Earth data) or PDS (Planetary data)"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Mission data, large-scale testing")),(0,i.kt)("h3",{id:"2-implement-naming-conventions"},"2. Implement Naming Conventions"),(0,i.kt)("h4",{id:"21-package-naming"},"2.1 Package Naming"),(0,i.kt)("p",null,"Follow standard naming conventions for each repository type:"),(0,i.kt)("mermaid",{value:"graph TD\n A[Package Type] --\x3e B{Language?}\n B --\x3e|Python| C[nasa-project-module]\n B --\x3e|Java| D[gov.nasa.project.module]\n B --\x3e|NodeJS| E[@nasa-project/module]"}),(0,i.kt)("h4",{id:"22-version-naming"},"2.2 Version Naming"),(0,i.kt)("p",null,"Use semantic versioning (MAJOR.MINOR.PATCH):"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"MAJOR: Breaking changes"),(0,i.kt)("li",{parentName:"ul"},"MINOR: New features, backward compatible"),(0,i.kt)("li",{parentName:"ul"},"PATCH: Bug fixes")),(0,i.kt)("h3",{id:"3-automate-publishing"},"3. Automate Publishing"),(0,i.kt)("h4",{id:"31-github-actions-workflow"},"3.1 GitHub Actions Workflow"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-yaml"},"name: Publish Package\n\non:\n release:\n types: [published]\n\njobs:\n publish:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v3\n - name: Set up environment\n uses: actions/setup-python@v3\n with:\n python-version: '3.x'\n - name: Build and publish\n env:\n TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}\n TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}\n run: |\n python -m pip install build twine\n python -m build\n python -m twine upload dist/*\n")),(0,i.kt)("h4",{id:"32-automated-testing-integration"},"3.2 Automated Testing Integration"),(0,i.kt)("mermaid",{value:"graph TD\n A[Code Change] --\x3e B[Run Tests]\n B --\x3e C{Tests Pass?}\n C --\x3e|Yes| D[Build Artifact]\n C --\x3e|No| E[Fix Issues]\n D --\x3e F[Publish to Repository]"}),(0,i.kt)("h3",{id:"4-maintain-delivery-pipeline"},"4. Maintain Delivery Pipeline"),(0,i.kt)("p",null,"Regular maintenance tasks:"),(0,i.kt)("ol",null,(0,i.kt)("li",{parentName:"ol"},"Update repository credentials"),(0,i.kt)("li",{parentName:"ol"},"Monitor publishing success rates"),(0,i.kt)("li",{parentName:"ol"},"Verify artifact integrity"),(0,i.kt)("li",{parentName:"ol"},"Review and update workflows"),(0,i.kt)("li",{parentName:"ol"},"Clean up old artifacts")),(0,i.kt)("h3",{id:"5-github-actions-workflow-example-for-pypi-project-continuous-delivery"},"5. GitHub Actions Workflow Example for PyPI Project Continuous Delivery"),(0,i.kt)("p",null,"Create a ",(0,i.kt)("inlineCode",{parentName:"p"},".github/workflows/pypi-cd.yml")," file in your GitHub repository with the following ",(0,i.kt)("a",{target:"_blank",href:n(7712).Z},"content"),":"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-yaml"},"name: Continuous Delivery for PyPI Project\n\non:\n push:\n branches:\n - main # Trigger on push to the 'main' branch\n tags:\n - 'v*.*.*' # Trigger on tags matching semantic versioning (v1.0.0)\n\njobs:\n # Job to set up the environment, install dependencies, and publish to PyPI\n publish-to-pypi:\n runs-on: ubuntu-latest\n\n steps:\n - name: Checkout repository\n uses: actions/checkout@v3\n\n - name: Set up Python\n uses: actions/setup-python@v3\n with:\n python-version: '3.x' # Use a specific Python version, e.g., '3.8', '3.9', etc.\n\n - name: Install dependencies\n run: |\n python -m pip install --upgrade pip\n pip install build twine # Required for building and publishing to PyPI\n\n - name: Build the package\n run: |\n python -m build # This creates the distribution files under the 'dist' directory\n\n - name: Publish package to PyPI\n env:\n TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} # Store PyPI credentials as GitHub secrets\n TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}\n run: |\n python -m twine upload dist/* # Uploads the package to PyPI\n")),(0,i.kt)("h2",{id:"frequently-asked-questions-faq"},"Frequently Asked Questions (FAQ)"),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Q: How do I handle dependencies between packages?")),(0,i.kt)("p",null,"A: Use semantic versioning and dependency ranges to manage package relationships."),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Q: What about handling sensitive data in artifacts?")),(0,i.kt)("p",null,"A: Use private repositories and encrypted secrets in CI/CD pipelines."),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Q: How often should artifacts be published?")),(0,i.kt)("p",null,"A: Publish on every tagged release for stable versions, and optionally for development versions."),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Q: How to manage large binary artifacts?")),(0,i.kt)("p",null,"A: Use specialized repositories like Amazon S3 for large artifacts and reference them in package metadata."),(0,i.kt)("h2",{id:"credits"},"Credits"),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Authorship"),":"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"https://github.com/yunks128"},"Kyongsik Yun"))),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Acknowledgements"),":"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"Thanks to the SLIM team for providing guidance")),(0,i.kt)("h2",{id:"feedback-and-contributions"},"Feedback and Contributions"),(0,i.kt)("p",null,"We welcome feedback and contributions to help improve and grow this page. Please see our ",(0,i.kt)("a",{parentName:"p",href:"https://nasa-ammos.github.io/slim/docs/contribute/contributing/"},"contribution guidelines"),"."))}m.isMDXComponent=!0},7712:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/files/pypi-cd-template-cac7a7a0d3f5806ee6451c68d396f70d.yml"}}]); \ No newline at end of file +"use strict";(self.webpackChunkdocusaurus=self.webpackChunkdocusaurus||[]).push([[6025],{3905:(e,t,n)=>{n.d(t,{Zo:()=>u,kt:()=>g});var a=n(7294);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function o(e){for(var t=1;t=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var s=a.createContext({}),p=function(e){var t=a.useContext(s),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},u=function(e){var t=p(e.components);return a.createElement(s.Provider,{value:t},e.children)},c="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},d=a.forwardRef((function(e,t){var n=e.components,i=e.mdxType,r=e.originalType,s=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),c=p(n),d=i,g=c["".concat(s,".").concat(d)]||c[d]||m[d]||r;return n?a.createElement(g,o(o({ref:t},u),{},{components:n})):a.createElement(g,o({ref:t},u))}));function g(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var r=n.length,o=new Array(r);o[0]=d;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l[c]="string"==typeof e?e:i,o[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>s,contentTitle:()=>o,default:()=>m,frontMatter:()=>r,metadata:()=>l,toc:()=>p});var a=n(7462),i=(n(7294),n(3905));const r={title:"Continuous Delivery",description:"A streamlined guide to setting up a robust continuous delivery pipeline, automating releases, and maintaining best practices."},o="Continuous Delivery",l={unversionedId:"guides/software-lifecycle/continuous-delivery/readme",id:"guides/software-lifecycle/continuous-delivery/readme",title:"Continuous Delivery",description:"A streamlined guide to setting up a robust continuous delivery pipeline, automating releases, and maintaining best practices.",source:"@site/docs/guides/software-lifecycle/continuous-delivery/readme.md",sourceDirName:"guides/software-lifecycle/continuous-delivery",slug:"/guides/software-lifecycle/continuous-delivery/",permalink:"/slim/docs/guides/software-lifecycle/continuous-delivery/",draft:!1,editUrl:"https://github.com/nasa-ammos/slim/tree/main/docs/guides/software-lifecycle/continuous-delivery/readme.md",tags:[],version:"current",frontMatter:{title:"Continuous Delivery",description:"A streamlined guide to setting up a robust continuous delivery pipeline, automating releases, and maintaining best practices."},sidebar:"guidesSidebar",previous:{title:"Secrets Detection",permalink:"/slim/docs/guides/software-lifecycle/security/secrets-detection/"},next:{title:"Continuous Integration",permalink:"/slim/docs/guides/software-lifecycle/continuous-integration/"}},s={},p=[{value:"Introduction",id:"introduction",level:2},{value:"Background",id:"background",level:3},{value:"Approach",id:"approach",level:3},{value:"Key Use Cases",id:"key-use-cases",level:3},{value:"Quick Start",id:"quick-start",level:2},{value:"Step-by-Step Guide",id:"step-by-step-guide",level:2},{value:"1. Select Package Repositories",id:"1-select-package-repositories",level:3},{value:"1.1 Code Packages",id:"11-code-packages",level:4},{value:"Python Packages",id:"python-packages",level:5},{value:"Java Packages",id:"java-packages",level:5},{value:"NodeJS Packages",id:"nodejs-packages",level:5},{value:"1.2 Container Images",id:"12-container-images",level:4},{value:"Public Containers",id:"public-containers",level:5},{value:"Private Containers",id:"private-containers",level:5},{value:"1.3 Test Data",id:"13-test-data",level:4},{value:"Small Datasets (<2GB)",id:"small-datasets-2gb",level:5},{value:"Medium Datasets (2GB-100GB)",id:"medium-datasets-2gb-100gb",level:5},{value:"Large Datasets (>100GB)",id:"large-datasets-100gb",level:5},{value:"2. Implement Naming Conventions",id:"2-implement-naming-conventions",level:3},{value:"2.1 Package Naming",id:"21-package-naming",level:4},{value:"2.2 Version Naming",id:"22-version-naming",level:4},{value:"3. Automate Publishing",id:"3-automate-publishing",level:3},{value:"3.1 GitHub Actions Workflow",id:"31-github-actions-workflow",level:4},{value:"3.2 Automated Testing Integration",id:"32-automated-testing-integration",level:4},{value:"4. Maintain Delivery Pipeline",id:"4-maintain-delivery-pipeline",level:3},{value:"5. GitHub Actions Workflow Example for PyPI Project Continuous Delivery",id:"5-github-actions-workflow-example-for-pypi-project-continuous-delivery",level:3},{value:"Frequently Asked Questions (FAQ)",id:"frequently-asked-questions-faq",level:2},{value:"Credits",id:"credits",level:2},{value:"Feedback and Contributions",id:"feedback-and-contributions",level:2}],u={toc:p},c="wrapper";function m(e){let{components:t,...r}=e;return(0,i.kt)(c,(0,a.Z)({},u,r,{components:t,mdxType:"MDXLayout"}),(0,i.kt)("h1",{id:"continuous-delivery"},"Continuous Delivery"),(0,i.kt)("pre",{align:"center"},"A streamlined guide to setting up a robust continuous delivery pipeline, automating releases, and maintaining best practices."),(0,i.kt)("h2",{id:"introduction"},"Introduction"),(0,i.kt)("h3",{id:"background"},"Background"),(0,i.kt)("p",null,"Continuous Delivery (CD) is the practice of automatically preparing code changes for production release, extending Continuous Integration (CI) to ensure that every validated change is always production-ready. This guide presents a simplified, practical approach to implementing CD through standardized repository selections, naming conventions, and automation."),(0,i.kt)("h3",{id:"approach"},"Approach"),(0,i.kt)("p",null,"Adopt a clear, four-step plan to implement Continuous Delivery effectively:"),(0,i.kt)("ol",null,(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("strong",{parentName:"li"},"Choose repositories"),"."),(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("strong",{parentName:"li"},"Adopt standardized naming conventions"),"."),(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("strong",{parentName:"li"},"Automate publishing"),"."),(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("strong",{parentName:"li"},"Maintain the delivery pipeline"),".")),(0,i.kt)("mermaid",{value:"graph TD\n Repository[Choose Repositories] --\x3e Naming[Adopt Naming
Conventions] --\x3e Automate[Automate Publishing]\n Automate --\x3e Maintain[Maintain Pipeline]\n Maintain --\x3e Repository"}),(0,i.kt)("h3",{id:"key-use-cases"},"Key Use Cases"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"Auto-publishing built artifacts to package managers."),(0,i.kt)("li",{parentName:"ul"},"Standardizing naming conventions across repositories."),(0,i.kt)("li",{parentName:"ul"},"Versioning releases using semantic versioning."),(0,i.kt)("li",{parentName:"ul"},"Distributing test data automatically."),(0,i.kt)("li",{parentName:"ul"},"Automating container image publication."),(0,i.kt)("li",{parentName:"ul"},"Enabling infrastructure-as-code deployment.")),(0,i.kt)("h2",{id:"quick-start"},"Quick Start"),(0,i.kt)("p",null,"The most important step in setting up continuous delivery is choosing the right repositories and implementing proper naming conventions."),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Key Concepts to Get Started:")),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},(0,i.kt)("a",{parentName:"strong",href:"#package-repositories"},"\u2b07\ufe0f Choose a Package Repository"))," based on your artifact type:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"PyPI for Python packages"),(0,i.kt)("li",{parentName:"ul"},"Maven Central for Java"),(0,i.kt)("li",{parentName:"ul"},"NPM Registry for NodeJS"),(0,i.kt)("li",{parentName:"ul"},"ECR (Amazon Elastic Container Registry)/DockerHub for Containers")),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"\ud83d\udcdd Implement ",(0,i.kt)("a",{parentName:"strong",href:"#naming-conventions"},"Standardized Naming Conventions"),":")),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"nasa-[project-org]-[module-name]")," for Python"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"gov.nasa.[project-org].[module-name]")," for Java"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"@nasa-[project-org]/[module-name]")," for NodeJS")),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"\ud83d\ude80 Set up ",(0,i.kt)("a",{parentName:"strong",href:"#automated-publishing"},"Automated Publishing"))," using GitHub Actions"),(0,i.kt)("h2",{id:"step-by-step-guide"},"Step-by-Step Guide"),(0,i.kt)("h3",{id:"1-select-package-repositories"},"1. Select Package Repositories"),(0,i.kt)("p",null,"Choose appropriate repositories based on your artifact type:"),(0,i.kt)("h4",{id:"11-code-packages"},"1.1 Code Packages"),(0,i.kt)("h5",{id:"python-packages"},"Python Packages"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": PyPI"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Size Limit"),": 60MB"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Cost"),": Free"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Python libraries and tools"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Setup Steps"),":",(0,i.kt)("ol",{parentName:"li"},(0,i.kt)("li",{parentName:"ol"},"Create account on PyPI"),(0,i.kt)("li",{parentName:"ol"},"Set up project with ",(0,i.kt)("inlineCode",{parentName:"li"},"setup.py")," or ",(0,i.kt)("inlineCode",{parentName:"li"},"pyproject.toml")),(0,i.kt)("li",{parentName:"ol"},"Configure automated publishing")))),(0,i.kt)("h5",{id:"java-packages"},"Java Packages"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": Maven Central"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Size Limit"),": No specific limit"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Cost"),": Free"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Java libraries and frameworks"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Setup Steps"),":",(0,i.kt)("ol",{parentName:"li"},(0,i.kt)("li",{parentName:"ol"},"Create Sonatype account"),(0,i.kt)("li",{parentName:"ol"},"Configure Maven settings"),(0,i.kt)("li",{parentName:"ol"},"Set up GPG signing")))),(0,i.kt)("h5",{id:"nodejs-packages"},"NodeJS Packages"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": NPM Registry"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Size Limit"),": No specific limit"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Cost"),": Free"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": JavaScript/TypeScript packages"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Setup Steps"),":",(0,i.kt)("ol",{parentName:"li"},(0,i.kt)("li",{parentName:"ol"},"Create NPM account"),(0,i.kt)("li",{parentName:"ol"},"Configure package.json"),(0,i.kt)("li",{parentName:"ol"},"Set up automated publishing")))),(0,i.kt)("h4",{id:"12-container-images"},"1.2 Container Images"),(0,i.kt)("h5",{id:"public-containers"},"Public Containers"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": GitHub Packages/GitLab Registry"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Open source projects"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Limitations"),": Higher latency for runtime")),(0,i.kt)("h5",{id:"private-containers"},"Private Containers"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": Amazon ECR"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Production deployments"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Features"),": Low-latency pulls, private repos")),(0,i.kt)("h4",{id:"13-test-data"},"1.3 Test Data"),(0,i.kt)("h5",{id:"small-datasets-2gb"},"Small Datasets (<2GB)"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": GitHub/GitLab Releases"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Naming"),": ",(0,i.kt)("inlineCode",{parentName:"li"},"[project-org]-[project-module]-test-dataset")),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Unit test data, small samples")),(0,i.kt)("h5",{id:"medium-datasets-2gb-100gb"},"Medium Datasets (2GB-100GB)"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": Amazon S3"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Features"),": Pre-signed URLs, bandwidth control"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Integration test data")),(0,i.kt)("h5",{id:"large-datasets-100gb"},"Large Datasets (>100GB)"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Repository"),": EOSDIS DAAC (Earth data) or PDS (Planetary data)"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("strong",{parentName:"li"},"Best For"),": Mission data, large-scale testing")),(0,i.kt)("h3",{id:"2-implement-naming-conventions"},"2. Implement Naming Conventions"),(0,i.kt)("h4",{id:"21-package-naming"},"2.1 Package Naming"),(0,i.kt)("p",null,"Follow standard naming conventions for each repository type:"),(0,i.kt)("mermaid",{value:"graph TD\n A[Package Type] --\x3e B{Language?}\n B --\x3e|Python| C[nasa-project-module]\n B --\x3e|Java| D[gov.nasa.project.module]\n B --\x3e|NodeJS| E[_at_nasa-project/module]"}),(0,i.kt)("h4",{id:"22-version-naming"},"2.2 Version Naming"),(0,i.kt)("p",null,"Use semantic versioning (MAJOR.MINOR.PATCH):"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"MAJOR: Breaking changes"),(0,i.kt)("li",{parentName:"ul"},"MINOR: New features, backward compatible"),(0,i.kt)("li",{parentName:"ul"},"PATCH: Bug fixes")),(0,i.kt)("h3",{id:"3-automate-publishing"},"3. Automate Publishing"),(0,i.kt)("h4",{id:"31-github-actions-workflow"},"3.1 GitHub Actions Workflow"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-yaml"},"name: Publish Package\n\non:\n release:\n types: [published]\n\njobs:\n publish:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v3\n - name: Set up environment\n uses: actions/setup-python@v3\n with:\n python-version: '3.x'\n - name: Build and publish\n env:\n TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}\n TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}\n run: |\n python -m pip install build twine\n python -m build\n python -m twine upload dist/*\n")),(0,i.kt)("h4",{id:"32-automated-testing-integration"},"3.2 Automated Testing Integration"),(0,i.kt)("mermaid",{value:"graph TD\n A[Code Change] --\x3e B[Run Tests]\n B --\x3e C{Tests Pass?}\n C --\x3e|Yes| D[Build Artifact]\n C --\x3e|No| E[Fix Issues]\n D --\x3e F[Publish to Repository]"}),(0,i.kt)("h3",{id:"4-maintain-delivery-pipeline"},"4. Maintain Delivery Pipeline"),(0,i.kt)("p",null,"Regular maintenance tasks:"),(0,i.kt)("ol",null,(0,i.kt)("li",{parentName:"ol"},"Update repository credentials"),(0,i.kt)("li",{parentName:"ol"},"Monitor publishing success rates"),(0,i.kt)("li",{parentName:"ol"},"Verify artifact integrity"),(0,i.kt)("li",{parentName:"ol"},"Review and update workflows"),(0,i.kt)("li",{parentName:"ol"},"Clean up old artifacts")),(0,i.kt)("h3",{id:"5-github-actions-workflow-example-for-pypi-project-continuous-delivery"},"5. GitHub Actions Workflow Example for PyPI Project Continuous Delivery"),(0,i.kt)("p",null,"Create a ",(0,i.kt)("inlineCode",{parentName:"p"},".github/workflows/pypi-cd.yml")," file in your GitHub repository with the following ",(0,i.kt)("a",{target:"_blank",href:n(7712).Z},"content"),":"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-yaml"},"name: Continuous Delivery for PyPI Project\n\non:\n push:\n branches:\n - main # Trigger on push to the 'main' branch\n tags:\n - 'v*.*.*' # Trigger on tags matching semantic versioning (v1.0.0)\n\njobs:\n # Job to set up the environment, install dependencies, and publish to PyPI\n publish-to-pypi:\n runs-on: ubuntu-latest\n\n steps:\n - name: Checkout repository\n uses: actions/checkout@v3\n\n - name: Set up Python\n uses: actions/setup-python@v3\n with:\n python-version: '3.x' # Use a specific Python version, e.g., '3.8', '3.9', etc.\n\n - name: Install dependencies\n run: |\n python -m pip install --upgrade pip\n pip install build twine # Required for building and publishing to PyPI\n\n - name: Build the package\n run: |\n python -m build # This creates the distribution files under the 'dist' directory\n\n - name: Publish package to PyPI\n env:\n TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} # Store PyPI credentials as GitHub secrets\n TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}\n run: |\n python -m twine upload dist/* # Uploads the package to PyPI\n")),(0,i.kt)("h2",{id:"frequently-asked-questions-faq"},"Frequently Asked Questions (FAQ)"),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Q: How do I handle dependencies between packages?")),(0,i.kt)("p",null,"A: Use semantic versioning and dependency ranges to manage package relationships."),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Q: What about handling sensitive data in artifacts?")),(0,i.kt)("p",null,"A: Use private repositories and encrypted secrets in CI/CD pipelines."),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Q: How often should artifacts be published?")),(0,i.kt)("p",null,"A: Publish on every tagged release for stable versions, and optionally for development versions."),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Q: How to manage large binary artifacts?")),(0,i.kt)("p",null,"A: Use specialized repositories like Amazon S3 for large artifacts and reference them in package metadata."),(0,i.kt)("h2",{id:"credits"},"Credits"),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Authorship"),":"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"https://github.com/yunks128"},"Kyongsik Yun"))),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Acknowledgements"),":"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"Thanks to the SLIM team for providing guidance")),(0,i.kt)("h2",{id:"feedback-and-contributions"},"Feedback and Contributions"),(0,i.kt)("p",null,"We welcome feedback and contributions to help improve and grow this page. Please see our ",(0,i.kt)("a",{parentName:"p",href:"https://nasa-ammos.github.io/slim/docs/contribute/contributing/"},"contribution guidelines"),"."))}m.isMDXComponent=!0},7712:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/files/pypi-cd-template-cac7a7a0d3f5806ee6451c68d396f70d.yml"}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.ea2549ce.js b/assets/js/runtime~main.22af30d0.js similarity index 99% rename from assets/js/runtime~main.ea2549ce.js rename to assets/js/runtime~main.22af30d0.js index 87164040..60a759f7 100644 --- a/assets/js/runtime~main.ea2549ce.js +++ b/assets/js/runtime~main.22af30d0.js @@ -1 +1 @@ -(()=>{"use strict";var e,a,c,f,d,b={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var c=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(c.exports,c,c.exports,r),c.loaded=!0,c.exports}r.m=b,r.c=t,e=[],r.O=(a,c,f,d)=>{if(!c){var b=1/0;for(i=0;i=d)&&Object.keys(r.O).every((e=>r.O[e](c[o])))?c.splice(o--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[c,f,d]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},c=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var d=Object.create(null);r.r(d);var b={};a=a||[null,c({}),c([]),c(c)];for(var t=2&f&&e;"object"==typeof t&&!~a.indexOf(t);t=c(t))Object.getOwnPropertyNames(t).forEach((a=>b[a]=()=>e[a]));return b.default=()=>e,r.d(d,b),d},r.d=(e,a)=>{for(var c in a)r.o(a,c)&&!r.o(e,c)&&Object.defineProperty(e,c,{enumerable:!0,get:a[c]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,c)=>(r.f[c](e,a),a)),[])),r.u=e=>"assets/js/"+({46:"ca705c4a",53:"935f2afb",150:"32de2db0",292:"1937bb63",380:"22251b09",493:"ed7f1a16",913:"3e49c4e2",948:"8717b14a",1028:"51785cc5",1178:"80ba3087",1757:"cd95f078",1914:"d9f32620",2031:"8c4308b7",2052:"69878e35",2081:"73e4d764",2119:"c612d4a7",2213:"39680a00",2267:"59362658",2362:"e273c56f",2535:"814f3328",2774:"6d6ba527",3021:"c36b7e6b",3085:"1f391b9e",3089:"a6aa9e1f",3514:"73664a40",3608:"9e4087bc",3689:"c441f7d5",3842:"4317fbf7",4013:"01a85c17",4024:"6fb1fe71",4081:"719b1851",4103:"684aec4d",4173:"3b327884",4195:"c4f5d8e4",4227:"d5decc06",4555:"e0d5b996",4642:"b741a796",5178:"d07f8614",5242:"de1ef56c",5288:"4c9dd493",5307:"f1e6a692",5620:"e9025b0e",5680:"c2f4afbb",6004:"a729c412",6025:"410931d6",6103:"ccc49370",6104:"d698a8c5",6189:"91db1580",6677:"2f60f69a",7170:"cb227d72",7275:"b4deb335",7414:"393be207",7890:"51f4fcc3",7918:"17896441",7972:"c2d7cd96",8361:"3cc55dcd",8400:"21821687",8558:"1fb8531c",8610:"6875c492",8636:"f4f34a3a",8671:"325c7275",8753:"30749e1f",8780:"8facacb8",8793:"403f595d",9003:"925b3f96",9278:"cdace248",9315:"abf58fe5",9343:"f3d1369a",9402:"64fc4b83",9510:"0ee94099",9514:"1be78505",9642:"7661071f",9721:"3b13db8a",9817:"14eb3368",9959:"f25b6166"}[e]||e)+"."+{46:"42cbb196",53:"e416f1ba",150:"1f5d068d",292:"c01fcc43",380:"d8a93ec4",487:"884cc7e0",493:"6470d80e",913:"d8ecb7ca",948:"914d35ec",1028:"02a1a2ea",1178:"77bdf4ed",1757:"88e6a440",1914:"8aa602a6",2031:"98e99271",2052:"f01ce896",2081:"add750cf",2119:"d79501aa",2213:"7d42604e",2267:"4445c936",2362:"0e41ec1f",2529:"ba6531e8",2535:"743a3596",2774:"f7552ece",3021:"a70b55fd",3085:"f199e4b5",3089:"a59b6240",3473:"28b2823f",3514:"a2ef6c76",3608:"fd66296e",3689:"fb308300",3842:"7ffbce29",4013:"298b1a20",4024:"1edfdccc",4081:"5489bd4c",4103:"f84e9873",4173:"4c384568",4195:"2974567a",4227:"699dc446",4555:"66988315",4642:"84986f00",4972:"71e74212",5178:"7b874a4b",5242:"b3771ba9",5288:"791558f2",5307:"1a0ccef4",5620:"a495e7a8",5680:"0277c7b0",6004:"8bb585dc",6025:"aaee0d12",6103:"a04a9b88",6104:"4db76e32",6189:"569a35fa",6316:"e253b8dd",6677:"047f220f",7170:"4bffee7f",7275:"34b93164",7414:"2b6bec03",7724:"1ce4c8a5",7890:"2e8be81b",7918:"63391c41",7972:"9df3316b",8361:"cf18e6da",8400:"20af5622",8558:"6ee91e0a",8610:"7dbfa267",8636:"ec4f7a24",8671:"ca7f10df",8753:"a89520b2",8780:"86d1476c",8793:"fbcc0f20",9003:"39c96a76",9278:"98c4f9dd",9315:"edda825d",9343:"2f33979c",9402:"8763c510",9487:"1902e0d9",9510:"d97cafc7",9514:"8f5a31ba",9620:"b536c143",9642:"4eb2b4ef",9721:"9606fcc4",9817:"7e910f19",9959:"c6d206b1"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),f={},d="docusaurus:",r.l=(e,a,c,b)=>{if(f[e])f[e].push(a);else{var t,o;if(void 0!==c)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var d=f[e];if(delete f[e],t.parentNode&&t.parentNode.removeChild(t),d&&d.forEach((e=>e(c))),a)return a(c)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=l.bind(null,t.onerror),t.onload=l.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/slim/",r.gca=function(e){return e={17896441:"7918",21821687:"8400",59362658:"2267",ca705c4a:"46","935f2afb":"53","32de2db0":"150","1937bb63":"292","22251b09":"380",ed7f1a16:"493","3e49c4e2":"913","8717b14a":"948","51785cc5":"1028","80ba3087":"1178",cd95f078:"1757",d9f32620:"1914","8c4308b7":"2031","69878e35":"2052","73e4d764":"2081",c612d4a7:"2119","39680a00":"2213",e273c56f:"2362","814f3328":"2535","6d6ba527":"2774",c36b7e6b:"3021","1f391b9e":"3085",a6aa9e1f:"3089","73664a40":"3514","9e4087bc":"3608",c441f7d5:"3689","4317fbf7":"3842","01a85c17":"4013","6fb1fe71":"4024","719b1851":"4081","684aec4d":"4103","3b327884":"4173",c4f5d8e4:"4195",d5decc06:"4227",e0d5b996:"4555",b741a796:"4642",d07f8614:"5178",de1ef56c:"5242","4c9dd493":"5288",f1e6a692:"5307",e9025b0e:"5620",c2f4afbb:"5680",a729c412:"6004","410931d6":"6025",ccc49370:"6103",d698a8c5:"6104","91db1580":"6189","2f60f69a":"6677",cb227d72:"7170",b4deb335:"7275","393be207":"7414","51f4fcc3":"7890",c2d7cd96:"7972","3cc55dcd":"8361","1fb8531c":"8558","6875c492":"8610",f4f34a3a:"8636","325c7275":"8671","30749e1f":"8753","8facacb8":"8780","403f595d":"8793","925b3f96":"9003",cdace248:"9278",abf58fe5:"9315",f3d1369a:"9343","64fc4b83":"9402","0ee94099":"9510","1be78505":"9514","7661071f":"9642","3b13db8a":"9721","14eb3368":"9817",f25b6166:"9959"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,c)=>{var f=r.o(e,a)?e[a]:void 0;if(0!==f)if(f)c.push(f[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((c,d)=>f=e[a]=[c,d]));c.push(f[2]=d);var b=r.p+r.u(a),t=new Error;r.l(b,(c=>{if(r.o(e,a)&&(0!==(f=e[a])&&(e[a]=void 0),f)){var d=c&&("load"===c.type?"missing":c.type),b=c&&c.target&&c.target.src;t.message="Loading chunk "+a+" failed.\n("+d+": "+b+")",t.name="ChunkLoadError",t.type=d,t.request=b,f[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,c)=>{var f,d,b=c[0],t=c[1],o=c[2],n=0;if(b.some((a=>0!==e[a]))){for(f in t)r.o(t,f)&&(r.m[f]=t[f]);if(o)var i=o(r)}for(a&&a(c);n{"use strict";var e,a,c,f,d,b={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var c=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(c.exports,c,c.exports,r),c.loaded=!0,c.exports}r.m=b,r.c=t,e=[],r.O=(a,c,f,d)=>{if(!c){var b=1/0;for(i=0;i=d)&&Object.keys(r.O).every((e=>r.O[e](c[o])))?c.splice(o--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[c,f,d]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},c=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var d=Object.create(null);r.r(d);var b={};a=a||[null,c({}),c([]),c(c)];for(var t=2&f&&e;"object"==typeof t&&!~a.indexOf(t);t=c(t))Object.getOwnPropertyNames(t).forEach((a=>b[a]=()=>e[a]));return b.default=()=>e,r.d(d,b),d},r.d=(e,a)=>{for(var c in a)r.o(a,c)&&!r.o(e,c)&&Object.defineProperty(e,c,{enumerable:!0,get:a[c]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,c)=>(r.f[c](e,a),a)),[])),r.u=e=>"assets/js/"+({46:"ca705c4a",53:"935f2afb",150:"32de2db0",292:"1937bb63",380:"22251b09",493:"ed7f1a16",913:"3e49c4e2",948:"8717b14a",1028:"51785cc5",1178:"80ba3087",1757:"cd95f078",1914:"d9f32620",2031:"8c4308b7",2052:"69878e35",2081:"73e4d764",2119:"c612d4a7",2213:"39680a00",2267:"59362658",2362:"e273c56f",2535:"814f3328",2774:"6d6ba527",3021:"c36b7e6b",3085:"1f391b9e",3089:"a6aa9e1f",3514:"73664a40",3608:"9e4087bc",3689:"c441f7d5",3842:"4317fbf7",4013:"01a85c17",4024:"6fb1fe71",4081:"719b1851",4103:"684aec4d",4173:"3b327884",4195:"c4f5d8e4",4227:"d5decc06",4555:"e0d5b996",4642:"b741a796",5178:"d07f8614",5242:"de1ef56c",5288:"4c9dd493",5307:"f1e6a692",5620:"e9025b0e",5680:"c2f4afbb",6004:"a729c412",6025:"410931d6",6103:"ccc49370",6104:"d698a8c5",6189:"91db1580",6677:"2f60f69a",7170:"cb227d72",7275:"b4deb335",7414:"393be207",7890:"51f4fcc3",7918:"17896441",7972:"c2d7cd96",8361:"3cc55dcd",8400:"21821687",8558:"1fb8531c",8610:"6875c492",8636:"f4f34a3a",8671:"325c7275",8753:"30749e1f",8780:"8facacb8",8793:"403f595d",9003:"925b3f96",9278:"cdace248",9315:"abf58fe5",9343:"f3d1369a",9402:"64fc4b83",9510:"0ee94099",9514:"1be78505",9642:"7661071f",9721:"3b13db8a",9817:"14eb3368",9959:"f25b6166"}[e]||e)+"."+{46:"42cbb196",53:"e416f1ba",150:"1f5d068d",292:"c01fcc43",380:"d8a93ec4",487:"884cc7e0",493:"6470d80e",913:"d8ecb7ca",948:"914d35ec",1028:"02a1a2ea",1178:"77bdf4ed",1757:"88e6a440",1914:"8aa602a6",2031:"98e99271",2052:"f01ce896",2081:"add750cf",2119:"d79501aa",2213:"7d42604e",2267:"4445c936",2362:"0e41ec1f",2529:"ba6531e8",2535:"743a3596",2774:"f7552ece",3021:"a70b55fd",3085:"f199e4b5",3089:"a59b6240",3473:"28b2823f",3514:"a2ef6c76",3608:"fd66296e",3689:"fb308300",3842:"7ffbce29",4013:"298b1a20",4024:"1edfdccc",4081:"5489bd4c",4103:"f84e9873",4173:"4c384568",4195:"2974567a",4227:"699dc446",4555:"66988315",4642:"84986f00",4972:"71e74212",5178:"7b874a4b",5242:"b3771ba9",5288:"791558f2",5307:"1a0ccef4",5620:"a495e7a8",5680:"0277c7b0",6004:"8bb585dc",6025:"52466794",6103:"a04a9b88",6104:"4db76e32",6189:"569a35fa",6316:"e253b8dd",6677:"047f220f",7170:"4bffee7f",7275:"34b93164",7414:"2b6bec03",7724:"1ce4c8a5",7890:"2e8be81b",7918:"63391c41",7972:"9df3316b",8361:"cf18e6da",8400:"20af5622",8558:"6ee91e0a",8610:"7dbfa267",8636:"ec4f7a24",8671:"ca7f10df",8753:"a89520b2",8780:"86d1476c",8793:"fbcc0f20",9003:"39c96a76",9278:"98c4f9dd",9315:"edda825d",9343:"2f33979c",9402:"8763c510",9487:"1902e0d9",9510:"d97cafc7",9514:"8f5a31ba",9620:"b536c143",9642:"4eb2b4ef",9721:"9606fcc4",9817:"7e910f19",9959:"c6d206b1"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),f={},d="docusaurus:",r.l=(e,a,c,b)=>{if(f[e])f[e].push(a);else{var t,o;if(void 0!==c)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var d=f[e];if(delete f[e],t.parentNode&&t.parentNode.removeChild(t),d&&d.forEach((e=>e(c))),a)return a(c)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=l.bind(null,t.onerror),t.onload=l.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/slim/",r.gca=function(e){return e={17896441:"7918",21821687:"8400",59362658:"2267",ca705c4a:"46","935f2afb":"53","32de2db0":"150","1937bb63":"292","22251b09":"380",ed7f1a16:"493","3e49c4e2":"913","8717b14a":"948","51785cc5":"1028","80ba3087":"1178",cd95f078:"1757",d9f32620:"1914","8c4308b7":"2031","69878e35":"2052","73e4d764":"2081",c612d4a7:"2119","39680a00":"2213",e273c56f:"2362","814f3328":"2535","6d6ba527":"2774",c36b7e6b:"3021","1f391b9e":"3085",a6aa9e1f:"3089","73664a40":"3514","9e4087bc":"3608",c441f7d5:"3689","4317fbf7":"3842","01a85c17":"4013","6fb1fe71":"4024","719b1851":"4081","684aec4d":"4103","3b327884":"4173",c4f5d8e4:"4195",d5decc06:"4227",e0d5b996:"4555",b741a796:"4642",d07f8614:"5178",de1ef56c:"5242","4c9dd493":"5288",f1e6a692:"5307",e9025b0e:"5620",c2f4afbb:"5680",a729c412:"6004","410931d6":"6025",ccc49370:"6103",d698a8c5:"6104","91db1580":"6189","2f60f69a":"6677",cb227d72:"7170",b4deb335:"7275","393be207":"7414","51f4fcc3":"7890",c2d7cd96:"7972","3cc55dcd":"8361","1fb8531c":"8558","6875c492":"8610",f4f34a3a:"8636","325c7275":"8671","30749e1f":"8753","8facacb8":"8780","403f595d":"8793","925b3f96":"9003",cdace248:"9278",abf58fe5:"9315",f3d1369a:"9343","64fc4b83":"9402","0ee94099":"9510","1be78505":"9514","7661071f":"9642","3b13db8a":"9721","14eb3368":"9817",f25b6166:"9959"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,c)=>{var f=r.o(e,a)?e[a]:void 0;if(0!==f)if(f)c.push(f[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((c,d)=>f=e[a]=[c,d]));c.push(f[2]=d);var b=r.p+r.u(a),t=new Error;r.l(b,(c=>{if(r.o(e,a)&&(0!==(f=e[a])&&(e[a]=void 0),f)){var d=c&&("load"===c.type?"missing":c.type),b=c&&c.target&&c.target.src;t.message="Loading chunk "+a+" failed.\n("+d+": "+b+")",t.name="ChunkLoadError",t.type=d,t.request=b,f[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,c)=>{var f,d,b=c[0],t=c[1],o=c[2],n=0;if(b.some((a=>0!==e[a]))){for(f in t)r.o(t,f)&&(r.m[f]=t[f]);if(o)var i=o(r)}for(a&&a(c);n - +
- + \ No newline at end of file diff --git a/blog/first-blog-post/index.html b/blog/first-blog-post/index.html index 2f086f1c..2202d5e0 100644 --- a/blog/first-blog-post/index.html +++ b/blog/first-blog-post/index.html @@ -11,13 +11,13 @@ - +

First Blog Post

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/index.html b/blog/index.html index 452dd63f..9246add8 100644 --- a/blog/index.html +++ b/blog/index.html @@ -11,13 +11,13 @@ - +

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/long-blog-post/index.html b/blog/long-blog-post/index.html index 0ac1aa0a..2fa086d1 100644 --- a/blog/long-blog-post/index.html +++ b/blog/long-blog-post/index.html @@ -11,13 +11,13 @@ - +

Long Blog Post

· 3 min read
Endilie Yacop Sucipto

This is the summary of a very long blog post,

Use a <!-- truncate --> comment to limit blog post size in the list view.

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/mdx-blog-post/index.html b/blog/mdx-blog-post/index.html index dbdd7bf4..7a3e334c 100644 --- a/blog/mdx-blog-post/index.html +++ b/blog/mdx-blog-post/index.html @@ -11,13 +11,13 @@ - +
- + \ No newline at end of file diff --git a/blog/tags/docusaurus/index.html b/blog/tags/docusaurus/index.html index e8a0ab97..77b4d95d 100644 --- a/blog/tags/docusaurus/index.html +++ b/blog/tags/docusaurus/index.html @@ -11,13 +11,13 @@ - +

4 posts tagged with "docusaurus"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/tags/facebook/index.html b/blog/tags/facebook/index.html index ff471c46..e53c203d 100644 --- a/blog/tags/facebook/index.html +++ b/blog/tags/facebook/index.html @@ -11,13 +11,13 @@ - +

One post tagged with "facebook"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/blog/tags/hello/index.html b/blog/tags/hello/index.html index 32eaadbf..c1d4521c 100644 --- a/blog/tags/hello/index.html +++ b/blog/tags/hello/index.html @@ -11,13 +11,13 @@ - +

2 posts tagged with "hello"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/blog/tags/hola/index.html b/blog/tags/hola/index.html index 49c8fc9f..79aafa74 100644 --- a/blog/tags/hola/index.html +++ b/blog/tags/hola/index.html @@ -11,13 +11,13 @@ - +

One post tagged with "hola"

View All Tags

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/tags/index.html b/blog/tags/index.html index 36aa2da6..4a094d8a 100644 --- a/blog/tags/index.html +++ b/blog/tags/index.html @@ -11,13 +11,13 @@ - +
- + \ No newline at end of file diff --git a/blog/welcome/index.html b/blog/welcome/index.html index ae3e2fcf..6e583009 100644 --- a/blog/welcome/index.html +++ b/blog/welcome/index.html @@ -11,13 +11,13 @@ - +

Welcome

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/docs/about/CODE_OF_CONDUCT/index.html b/docs/about/CODE_OF_CONDUCT/index.html index 646e9e35..c9d77bb4 100644 --- a/docs/about/CODE_OF_CONDUCT/index.html +++ b/docs/about/CODE_OF_CONDUCT/index.html @@ -11,7 +11,7 @@ - + @@ -65,7 +65,7 @@ Mozilla's code of conduct enforcement ladder.

For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.

- + \ No newline at end of file diff --git a/docs/about/GOVERNANCE/index.html b/docs/about/GOVERNANCE/index.html index 45b68d85..ff5e9f69 100644 --- a/docs/about/GOVERNANCE/index.html +++ b/docs/about/GOVERNANCE/index.html @@ -11,13 +11,13 @@ - +

SLIM Project Governance

This governance model aims to create an open source community that encourages transparency, contributions, and collaboration, but maintains sound technical and quality standards. Our goal is to build a community comprised of members across the SLIM community and beyond, including from private organizations, universities, government organizations, and international organizations.

The project follows a fairly liberal contribution model where people and/or organizations who do the most work will have the most influence on project direction. Roles determine decision making influence, and governing committees (e.g. technical steering, project steering) are set up to ensure the project's direction is in-line with requirements / goals while supporting flexibility for future growth and membership. Technical decision making will primarily be made through a "consensus-seeking" approach within the respective governing committees.

Roles

RoleRestricted ToDescriptionRead/ClonePropose Pull RequestComment in Tickets / DiscussionsTriageReviewCommitTechnical DecisionsProject Decisions
UserNoneAnyone downloading, deploying, or operating the software to meet a specific objective.
ContributorNoneAnyone providing input to the project, including: code, issues, documentation, graphics, etc.
TriagerContributorSubset of contributors demonstrating a strong familiarity with the project.
CollaboratorContributorSubset of contributors granted write access to one or more of the project repositories upon selection by TSC
Technical Steering Committee MemberCollaboratorA subset of collaborators having technical decision making authority and admin privileges
Project Steering Committee MemberStakeholdersSponsor organization representatives (i.e. those providing funding to the project) and key stakeholders with authority to guide project based on requirements, budget, schedule, etc.
Product ManagerStakeholdersOverall manager of project with final authority over all key decisions when consensus cannot be reached

User

Anyone who has downloaded, deployed, or operated SLIM to meet a specific objective. This project was primarily designed for developing, iterating, and disseminating software process improvements, but let us know if you've found other uses for it.

Contributor

Contributors include anyone that provides input to the project. This includes code, issues, documentation, graphics, designs, or anything else that tangibly improves the project. We encourage you to start contributing right away by joining our Discussions or submitting an Issue.

Triager

Subset of contributors who have demonstrated a strong familiarity with the project and are regularly contributing to the project via issue creation, commenting, discussions, etc. Triagers are given specific permissions do the following:

- Label issues and pull requests
- Comment, close, and reopen issues and pull requests

List of current Triagers

Collaborator

Subset of contributors who have been given write access to one or more project repositories. Both contributors and collaborators can propose changes to the project via pull requests, but only collaborators can formally review and approve (merge) these requests. Any contributor who has made a non-trivial contribution should be on-boarded as a collaborator in a timely manner.

If you are planning on making a substantial contribution to the project or feel as though you should be given write access to a repository, please send a request to https://github.com/riverma/

List of current collaborators

Technical Steering Committee Member

A subset of the collaborators forms the Technical Steering Committee (TSC). The TSC has authority over the following aspects of this project:

  • Technical direction and guidance
  • Committee governance and process
  • Contribution policy
  • Conduct guidelines
  • Maintaining the list of collaborators

TSC Committee Members

Emeriti

TSC Emeriti

#### Scope

The TSC is primarily responsible for the following project repositories:

However, the TSC also has the responsibility to interface with and monitor third-party dependencies of the project for key changes impacting SLIM. These third-party dependencies include:

  • N/A

Decision Making Process

Any community member can create an issue or comment asking the TSC to review something. Prior to implementing a substantial contribution, the design of that contribution should be reviewed by at least one member of the TSC. If consensus-seeking fails during the review of a pull request or in design discussions, the issue will be addressed by the TSC to make a determination on direction. TSC members will meet regularly and will keep track of decisions made when consensus was not met.

The TSC can nominate new members at any time. Candidates for membership tend to be collaborators who have shown great dedication to the project and/or experts in a particular domain or project component. TSC members are expected to be active contributors or members who have made significant contributions within the past 12 months.

Project Management Committee (PMC) Member

The Project Management Committee (PMC) is made up of sponsor organization representatives (i.e. those providing funding to the project) and key stakeholders who rely or plan to rely on the project to meet a critical need. The PMC has the following responsibilities:

  • Maintaining the overall project roadmap
  • Determining project requirements and commitments to sponsors and stakeholders
  • Assessing available resources and allocating them across the project
  • Monitoring and reporting on progress against the roadmap
  • On-boarding new sponsors and stakeholders
  • Overall project governance (including this policy)
  • Addressing any legal considerations

PMC Committee Members

Emeriti

PMC Emeriti

Scope

The PMC has management authority over the same project repositories over which the TSC has technical authority over.

Decision Making Process

The PMC will consist of a product manager and additional representative from sponsors and key stakeholders. The PMC or sponsoring organizations can nominate new members at any time. Care should be taken not to include too many members from a single stakeholder project or organization.

Regular stakeholder meetings are held to discuss current project status and propose changes to the project roadmap. If stakeholder representatives and sponsors concur with these proposals during the meeting, they will be immediately adopted. A member of the PMC will ensure the changes have been captured and recorded. Regular stakeholder meetings will be open to the entire community, but only members of the PMC have decision making authority.

Additional meetings may be held if consensus is not met or to discuss significant changes to the roadmap due to changes in stakeholder priorities or available resources. Any decision made outside of stakeholder meetings must still be approved by all sponsors and stakeholders. If full consensus cannot be made, the product manager has the final authority to determine project direction. Any non-concurrences from stakeholders or sponsors will be noted.

Product Manager

Overall manager of the project with final authority over all key decisions when consensus cannot be reached within the TSC or PSC. The product manager is often chosen at the onset of project initiation and can be reassigned by the PMC (with institutional approval, if applicable).

Acknowledgements

Much of this governance model was adapted from the other notable open source projects including node.js, OpenSSL, PostgresQL, and OpenMCT. We would like to thank those projects for setting the foundation upon which this model was built.

- + \ No newline at end of file diff --git a/docs/about/index.html b/docs/about/index.html index ddb4a245..9aeaddc4 100644 --- a/docs/about/index.html +++ b/docs/about/index.html @@ -11,13 +11,13 @@ - +

About

A shared resource for discussing, iterating and referencing best practices in software lifecycle process improvements for multi-mission space and ground software

📖 View our Infographic (PDF)

Software Lifecycle Improvement & Modernization (SLIM) is a project focused on collecting, developing, and disseminating best practices and process improvement strategies in NASA multi-mission software development lifecycle ecosystems. SLIM represents both a community of contributors as well as a continually evolving repository for best practices documentation.

SLIM best practice guides and recommendations are all open source, which means you have the freedom to use our work (in accordance with our permissive LICENSE) as well as contribute and help shape our future work. We're excited to welcome new contributors and users.

Our Focus

There are three key areas within the software development lifecycle improvement space that SLIM focuses on providing best practice guidance for:

SLIM-scope

Our Process

1. Ask Our Community

We reach out to community member projects, and solicit input on outstanding process improvement needs, including the respective needs' relative ranking in importance / criticality. A few needs are chosen to focus active contributor time upon - seeking to develop best practice guides, including items such as use case lists, trade-studies, reference architectures and starter kits. That being said, any external contributors are free to propose best practice guides to our project at any time in the form of a contribution. See our /slim/docs/contribute/submit-best-practice for more details.

You can see our current prioritized list of community-ranked best practice development ideas in our planning board.

2. Develop Standards & Best Practices

We use a technique we like to call "standards-as-code", which basically means that we develop best practices that are patchable to existing or new community member project codebases or are deployable to their infrastructure. Therefore, we target providing best practices in a way that is most easily can be infused into existing SLIM community member projects. This way, we are actually able to scale out our best practice dissemination widely to many projects at once. For best practices that are not patchable to repositories, we ask our contributors to develop automation that can be run as a script or set of commands - and deploy these to SLIM community members through issue-tickets.

SLIM-dev-process

More information on our development process for best practice guides can be found in our Contributing Guide.

3. Publish Best Practices

As mentioned above, we operate under a "standards-as-code" philosophy, which means that newly developed standards and best practices are directly infused into member projects through pull requests or issue tickets. This hands-on approach ensures that improvements are easily infusable into the SLIM community member repositories.

To see the current adoption status and how these practices are being integrated into various SLIM community member projects, check out our leaderboard pages:

Our Community Members

What does it mean to be a community member of SLIM? The following ideas apply to community member projects:

  • Has a representative (a point-of-contact) who interfaces with the SLIM effort
  • Receives pull-requests / issues containing SLIM best practice recommendations
  • Openness and willingness to infuse SLIM best practices
  • Prioritizes contributing back to the SLIM project

The following list of public projects are currently SLIM community members:

NamePoint of Contact (POC)
Advanced Multi-Mission Operations System (AMMOS) Multimission Ground System and Services (MGSS) Instrument Data System (IDS)@PaulMRamirez
Hybrid Science Data System (HySDS)@hookhua
F' (FPrime)@LeStarch @thomas-bc
Observational Products for End-Users from Remote Sensing Analysis (OPERA)@riverma
Planetary Data System (PDS) Engineering Node (EN)@jordanpadams @nutjob4life
Unity Science Data System (SDS)@GodwinShen @mike-gangl @nutjob4life
- + \ No newline at end of file diff --git a/docs/category/-governance/index.html b/docs/category/-governance/index.html index 77d82194..d603bade 100644 --- a/docs/category/-governance/index.html +++ b/docs/category/-governance/index.html @@ -11,13 +11,13 @@ - +
- + \ No newline at end of file diff --git a/docs/category/-information-sharing/index.html b/docs/category/-information-sharing/index.html index db10c026..3d418f00 100644 --- a/docs/category/-information-sharing/index.html +++ b/docs/category/-information-sharing/index.html @@ -11,13 +11,13 @@ - + - + \ No newline at end of file diff --git a/docs/category/-software-lifecycle/index.html b/docs/category/-software-lifecycle/index.html index ffa1659e..85e4d5b7 100644 --- a/docs/category/-software-lifecycle/index.html +++ b/docs/category/-software-lifecycle/index.html @@ -11,13 +11,13 @@ - + - + \ No newline at end of file diff --git a/docs/category/application-starter-kits/index.html b/docs/category/application-starter-kits/index.html index 419d5d19..bbf46cfe 100644 --- a/docs/category/application-starter-kits/index.html +++ b/docs/category/application-starter-kits/index.html @@ -11,13 +11,13 @@ - + - + \ No newline at end of file diff --git a/docs/category/contributions/index.html b/docs/category/contributions/index.html index 868d733a..939b77ac 100644 --- a/docs/category/contributions/index.html +++ b/docs/category/contributions/index.html @@ -11,13 +11,13 @@ - + - + \ No newline at end of file diff --git a/docs/category/security/index.html b/docs/category/security/index.html index 0f5a05cf..a20912cd 100644 --- a/docs/category/security/index.html +++ b/docs/category/security/index.html @@ -11,13 +11,13 @@ - + - + \ No newline at end of file diff --git a/docs/contribute/contributing/development-process/index.html b/docs/contribute/contributing/development-process/index.html index 2a9fab16..5bcce12b 100644 --- a/docs/contribute/contributing/development-process/index.html +++ b/docs/contribute/contributing/development-process/index.html @@ -11,13 +11,13 @@ - +

Our Development Process

Our project integrates contributions from many people, and so we'd like to outline a process you can use to visualize how your contributions may be integrated if you provide something.

Fork our Repository

Forking our repository, as opposed to directly committing to a branch is the preferred way to propose changes.

See this GitHub guide on forking for information specific to GitHub.com

Find or File an Issue

Make sure people are aware you're working on a patch! Check out our issue tracking system and find an open issue you'd like to work against, or alternatively file a new issue and mention you're working on a patch.

Choose the Right Branch to Fork

Our project typically has the following branches available, make sure to fork either the default branch or a branch someone else already tagged with a particular issue ticket you're working with.

  • main - default branch, and contains mature SLIM deliverables and should be the typical branch you fork
  • gh-pages - live rendering of SLIM website, auto-generated by changes committed to main branch via a GitHub Action

Make your Modifications

Within your local development environment, this is the stage at which you'll propose your changes, and commit those changes back to version control. See the README.md or development guide for more specifics on what you'll need as prerequisites to setup your local development environment.

Commit Messages

Commit messages to version control should reference a ticket in their title / summary line:

Issue #248 - Show an example commit message title

This makes sure that tickets are updated on GitHub with references to commits that are related to them.

Commit should always be atomic. Keep solutions isolated whenever possible. Filler commits such as "clean up white space" or "fix typo" should be merged together before making a pull request, and significant sub-feature branches should be rebased to preserve commit history. Please ensure your commit history is clean and meaningful!

Additionally, remember to "Sign-Off" on your commits to align with our Developer Certificate of Origin (DCO) policy.

Submit a Pull Request

Pull requests are the core way our project will receive your patch contributions. Navigate to your branch on your own fork within the version control system, and submit a pull request or submit the patch text to our project.

Please make sure to provide a meaningful text description to your pull requests, whenever submitted. Our pull-request template will be auto-generated for you when you create your pull-request. See the template here.

Working on your first Pull Request? See guide: How to Contribute to an Open Source Project on GitHub

Reviewing your Pull Request

Reviewing pull-requests, or any kinds of proposed patch changes, is an art. That being said, we follow the following best practices:

  • Intent - is the purpose of your pull-request clearly stated?
  • Solution - is your pull-request doing what you want it to?
  • Correctness - is your pull-request doing what you want it to correctly?
  • Small Patches - is your patch of a level of complexity and brevity that it can actually be reviewed by a human being? Or is does it involve too much content for one pull request?
  • Coding best practices - are you following best practices in the coding / contribution language being used?
  • Readability - is your patch readable, and ultimately maintainable, by others?
  • Reproducibility - is your patch reproducible by others?
  • Tests - do you have or have conducted meaningful tests?
- + \ No newline at end of file diff --git a/docs/contribute/contributing/index.html b/docs/contribute/contributing/index.html index af330c94..9fa64b45 100644 --- a/docs/contribute/contributing/index.html +++ b/docs/contribute/contributing/index.html @@ -11,13 +11,13 @@ - +

Contributing 101

Thanks for taking the time to consider contributing! We very much appreciate your time and effort. This document outlines the many ways you can contribute to our project, and provides detailed guidance on best practices. We look forward to your help!

- + \ No newline at end of file diff --git a/docs/contribute/contributing/introduction/index.html b/docs/contribute/contributing/introduction/index.html index 283956ab..db47afba 100644 --- a/docs/contribute/contributing/introduction/index.html +++ b/docs/contribute/contributing/introduction/index.html @@ -11,13 +11,13 @@ - +

Introduction

SLIM is a joint, community-based effort at collective software process improvements. Therefore, we need your help! Here's the basics on contributing:

  • Anyone can contribute!
  • You can contribute at any time
  • You can contribute in any way (code, documentation, discussion topics, issue ticket discussions, etc.)

We plan out our work into fiscal quarters (i.e. Oct - Dec as Q1, Jan - Mar as Q2, etc.). This helps our stakeholders (the folks using our process improvements) plan to incorporate process improvements better into their schedules. See our Planning Board for our most up-to-date plan. Generally speaking, we try to get together with our stakeholders and our community contributors at the start of every fiscal quarter to plan out what we should work on. That being said, since we're an open source project, we also accept and encourage ad-hoc contributions at any time - just note it may take some time to review / decide whether to incorporate.

- + \ No newline at end of file diff --git a/docs/contribute/contributing/know-before-contribute/index.html b/docs/contribute/contributing/know-before-contribute/index.html index a9c2da27..14b4d203 100644 --- a/docs/contribute/contributing/know-before-contribute/index.html +++ b/docs/contribute/contributing/know-before-contribute/index.html @@ -11,13 +11,13 @@ - +

Know Before you Contribute

Before you begin contributing to our project, it'll be a good idea to ensure you've satisfied the below pre-requisites.

License

Our project has our licensing terms, including rules governing redistribution, documented in our LICENSE file. Please take a look at that file and ensure you understand the terms. This will impact how we, or others, use your contributions.

Code of Conduct

Our Code of Conduct helps facilitate a positive interaction environment for everyone involved with the team, and provides guidance on what to do if you experience problematic behavior. Read more in our CODE_OF_CONDUCT.md, and make sure you agree to its terms.

Developer Environment

For patch contributions, see our Developer Documentation (TBD) for more details on how to set up your local environment, to best contribute to our project.

At a minimum however to submit patches (if using Git), you'll want to ensure you have:

  1. An account on the Version Control System our project uses (i.e. GitHub).
  2. The Version Control System client (i.e. Git) installed on your local machine.
  3. The ability to edit, build, and test our project on your local machine. Again, see our README.md or detailed developer guide for more details

Communication Channels

Before contributing changes to our project, it's a great idea to be familiar with our communication channels and to socialize your potential contributions to get feedback early. This will help give you context for your contributions, no matter their form.

Our communication channels are:

  • Issue tracking system - a regularly monitored area to report issues with our software or propose changes
  • Discussion board - an permanently archived place to hold conversations related to our project, and to propose as well as show+tell topics to the contributor team. This resource can be searched for old discussions.
- + \ No newline at end of file diff --git a/docs/contribute/contributing/ways-to-contribute/index.html b/docs/contribute/contributing/ways-to-contribute/index.html index 725044a4..c71918be 100644 --- a/docs/contribute/contributing/ways-to-contribute/index.html +++ b/docs/contribute/contributing/ways-to-contribute/index.html @@ -11,13 +11,13 @@ - +

Ways to Contribute

📖 Best Practice Guides

Please see our Submit a Best Practice Guide for more details.

⚠️ Issue Tickets

Do you like to talk about new features, changes, requests?

Issue tickets are a very simple way to get involved in our project. It also helps new contributors get an understanding of the project more comprehensively. This is a great place to get started with the project if you're not sure where to start.

See our list of issues at: https://github.com/NASA-AMMOS/slim/issues

Cleaning up Duplicate Issues

Often we receive duplicate issues that can confuse project members on which issue ticket to hold conversations upon.

Here's how you can help:

  1. Scan the list of open issue tickets for duplicate titles, or internal wording
  2. If you find duplicates, copy / paste the below message on the conversation thread of the issue ticket that has less participants involved
This is a duplicate issue. Please migrate conversations over to [issue-XYZ](hyperlink to issue)

Good First Issues

Issue tickets can vary in complexity, and issues labeled with good first issue labels are often a great way to get started with the project as a newcomer.

Take a look at our issue tracking system, and filter by good first issue for issues that are low-complexity, and that will help you get familiar with our issue tracking and patch submission process.

Suggesting New Issue Labels

Labels within our issue tracking system are a great way to quickly sort through tickets. The project may not yet have labels to cover the full variety of issue tickets. Take a look through our list of issues, and if you notice a set of issue tickets that seem similar but are not categorized with an existing label, go ahead submit a request within one of the issues you've looked at with the following text:

I've noticed several other issues that are of the same category as this issue. Shall we make a new label for these types of issues?

Submitting Bug Issues

Resolving bugs is a priority for our project. We welcome bug reports. However, please make sure to do the following prior to submitting a bug report:

  • Check for duplicates - there may be a bug report already describing your issue, so check the issue tracking system first.

Here's some guidance on submitting a bug issue:

  1. Navigate to our issue tracking system and file a new issue
  2. Select a bug template (if available) for your issue
    1. Fill out the template fields to the best of your ability, including output snippets or screenshots where applicable
  3. Follow the general guidelines below for extra information about your bug
    1. Include a code snippet if you have it showcasing the bug
    2. Provide reproducible steps of how to recreate the bug
    3. If the bug triggers an exception or error message, include the full message or stacktrace
    4. Provide information about your operating system and the version of our project you're using

📀 Media

Media, such as such as images, videos, sound files, etc., are an excellent way to explain documentation to a wider audience more easily. Include media in your contributions as often as possible.

When including media into our version-control system, it is recommended to use formats such as:

  • Diagrams: Mermaid format
  • Images: JPEG format
  • Videos: H264 MPEG format
  • Sounds: MP3 format

❓ Questions

Answering questions is an excellent way to learn more about our project, as well as get better known in our project community.

Here are just a few ways you can help answer questions for our project:

When answering questions, keep the following in mind:

  • Be polite and friendly. See our Code of Conduct recommendations as you interact with others in the team.
  • Repeat the specific question you are answering, followed by your suggestion.
  • If suggesting code, repeat the line of code that needs to be altered, followed by your alteration
  • Include any post-steps or checks to verify your answer can be reproduced

🎨 Design

Design files can help to guide new features and new areas of expansion for our project. We welcome these kinds of contributions.

Here are just a few ways you can help provide design recommendations for our project:

  • Create visual mockups or diagrams to increase usability of our project applications. This can apply to user interfaces, documentation structuring, or even code architecture diagrams.
  • Conduct user research to understand user needs better. Save your findings within spreadsheets that the project team / contributors can review.
  • Create art, such as logos or icons, to support the user experience for the project

Each of the above can be contributed directly to repository code, and you should use our development process to contribute your additions.

🎟️ Meetups

A great way to contribute towards our project goals is to socialize and encourage people to meet and learn more about each other. Consider ideas like:

  • Propose workshops or meetups regarding some topic within our project
  • Help point project contributors and community members to conferences and publications where they may socialize their unique innovations
  • Schedule in-person or virtual happy-hours to help create a more social atmosphere within the project community

For the above ideas, use our communication channels to propose get-togethers.

- + \ No newline at end of file diff --git a/docs/contribute/submit-best-practice/index.html b/docs/contribute/submit-best-practice/index.html index b968846a..5ac69111 100644 --- a/docs/contribute/submit-best-practice/index.html +++ b/docs/contribute/submit-best-practice/index.html @@ -11,13 +11,13 @@ - +

Submit a Best Practice Guide

Are you interested in submitting a best practice guide to the SLIM project? You've come to the right place!

Below, we outline the important steps involved - including things like making a ticket, making your contribution, and reviewing / disseminating it.

See Our Process for a high-level overview of how SLIM best practice guides are developed. We use the open source contribution model to ideate, develop, review, and disseminate best practice guides and standards.

1⃣️ Find or Make a Ticket

The first step in making a contribution to SLIM is to make or select a ticket. This is important so that you don't inadvertently duplicate work that's already done / being done and help communicate your idea with the community. Also, not every best practice solution is appropriate for SLIM; therefore, its always a good idea to talk to the community first before you make a pull request.

To create an issue for the NASA-AMMOS/slim repository:

  1. Go to NASA-AMMOS/slim on GitHub
  2. Click on the "Issues" tab
  3. Select "New issue" > appropriate template (e.g., "New Best Practice Guide", "Improve an Existing Best Practice Guide", or "New Process Improvement Need")
    • NOTE: the "New Process Improvement Need" ticket type is intended for documenting needs only, rather than solution ideas.
  4. Enter a concise title and fill out the template
  5. Add labels and screenshots as relevant
  6. Review and click on "Submit new issue"
  7. Engage in discussions on the ticket if needed

Follow GitHub best practices: be clear and concise.

2⃣️ Initialize a Draft Pull Request

To submit your solution to the NASA-AMMOS/slim repository follow the below steps. Note: we highly recommend iterating a draft pull request rather than issuing a pull request after you've already written up a guide - the SLIM community can provide much better feedback as you iterate! We also recommend making a fork for the SLIM repository to ensure you can demo your guide easily from your own GitHub account when the guide is ready.

  1. Fork NASA-AMMOS/slim
  2. Clone your fork (git clone https://github.com/YourUsername/slim.git) on your local machine
  3. Use the main branch on your fork - this allows you to host a copy of, and demo the SLIM website at https://<your-username>.github.io/slim
  4. Develop, iterate and commit your solution as it grows (see Step 3 below Develop Your Contribution)
  5. Within your fork, click the "Contribute" button, choose to submit a pull request and fill in details and reference any related issues. Consider keeping your a pull request a draft pull request while you iterate so the community knowns you're still working on the contribution.
  6. Address review feedback promptly and iterate as needed. Once your pull request is finalized, ensure it is no longer in "draft" mode. A SLIM collaborator and reviewer will help you finalize and get your contribution merged if possible.
  7. Keep your fork synchronized with the original repo using git fetch upstream and git merge upstream/main

3⃣️ Develop Your Contribution

When developing your contribution to the NASA-AMMOS/slim repository, consider the following sub-sections:

Think about Automation

To make it easier for users to adopt your best practice solution, consider presenting it as templates, software automation, or starter kits. This approach allows users to quickly implement your best practice in production. By providing automated tools or templates, you can streamline the adoption process and increase the likelihood of successful implementation.

Read more about our automation philosophy in Develop Standards & Best Practices.

Adhere to Folder Structure

To maintain organization and consistency within the repository, create a new folder for your best practice guide. Place this folder within the appropriate sub-folder in the docs/guides directory of the NASA-AMMOS/slim repository. Ensure that the folder structure aligns with the type of best practice you are developing, allowing users to easily locate and reference your guide.

Additionally, if your best practice guide includes infusible assets such as templates, code, or any other kind of automation that helps integrate your best practice into projects, you should store these assets within the /static/assets directory. Within this directory, create a sub-category that reflects the nature of the assets. This organization allows users to find and utilize the assets more easily, and it aligns with the repository's structure.

For example, if you were to add a new best practice guide related to software-lifecycle security, you'd create a new folder called "my-security-guide" in the below directory:

docs/
├── about
├── contribute
└── guides
├── documentation
├── search.md
└── software-lifecycle
├── application-starter-kits
├── continuous-integration
├── continuous-testing
└── security
├── README.md
├── dependabot
├── secrets-detection
└── my-security-guide
└── README.md
└── other-file.txt
└── other-file.json
└── other-file.jpg

And for infusible assets related to this guide, you would store them in a structure like this:

static/
├── assets
│ └── software-lifecycle
│ └── security
│ ├── my-security-guide-template.md
│ ├── my-security-script.sh
│ └── my-security-tool-config.json

Use our Standard Guide Template

To maintain uniformity and ease of understanding, contributors are urged to utilize the below template when submitting their best practice guides. This structured format ensures clarity, beginning with a concise title and a one-liner description to capture the essence of the proposal. The Introduction sets the context, Prerequisites identify essential tools or knowledge, and the Step-by-Step Guide offers a methodical walkthrough, enhanced optionally with illustrative images. An FAQ section addresses potential queries, and appropriate credits acknowledge contributors and inspirations. This standard layout not only ensures that each guide retains consistency and comprehensibility, but it also facilitates smoother automation and integration within the SLIM ecosystem.

Directions:

  • Create a new Markdown file for your guide (sample below) and call it README.md. Its advised to create a new folder for your best practice, where you can include multiple files, templates, and miscellaneous files if needed. See the above "Adere to Folder Structure" section.
  • Copy/paste the below template into the file
  • Fill in the guide with your contents
# _Title Goes Here_

<pre align="center">One sentence description of your best practice solution.</pre>

## Introduction

**Background**: _A longer description of the problem you aim to solve and your solution. Talk about the background of why this is needed, what kind of benefits the user might enjoy, and any other background information that may be useful for the reader. Additionally, talk about the expected outcome the user can expect in implementing your solution._

**Use Cases**:
- _A list of the types of use cases where your process improvement solution will shine_
- _e.g. Making code repository README's consistent for internal and external contributors_

---

## Prerequisites
_List any software, hardware, or skills required to utilize the solution._

* Prerequisite 1
* Prerequisite 2
* ...

---

## Quick Start
**[Link to Process Improvement Solution (template/code sample/tool/etc.)](#)**

_A brief description of what the link provides, e.g., "Click the link above to access the full template for the README.md file."_

---

## Step-by-Step Guide

1. **Step 1**: _Brief description of the step._
![Optional Image for Step 1](imageURL_for_step1)

2. **Step 2**: _Brief description of the step._
![Optional Image for Step 2](imageURL_for_step2)

3. ...

---

## Frequently Asked Questions (FAQ)

- Q: Example question relevant to this guide
- A: Example answer to the question

---

## Credits

**Authorship**:
- _List of contributing authors of this write-up who actually wrote words. Link to GitHub profiles if available, e.g. [Bugs Bunny](https://www.github.com/bbuny573429)_

**Acknowledgements**:
* Source/Organization 1 that inspired the solution or was adapted from
* Source/Organization 2 that inspired the solution or was adapted from
* ...

---

## Feedback and Contributions

We welcome feedback and contributions to help improve and grow this page. Please see our [contribution guidelines](https://nasa-ammos.github.io/slim/docs/contribute/contributing/).

Add Entry to the Registry

To document metadata about your best practice and ensure that it appears in our search page, you need to add a JSON entry to the data/slim-registry.json file within the NASA-AMMOS/slim repository. Here's how you can do it:

  1. Basic Fields: Start by filling out the basic fields as shown in the example below. These include title, uri, category, description, tags, and last-updated.

  2. Assets Metadata: Additionally, you should include an assets metadata element to describe any infusible assets associated with your best practice. These could be templates, code samples, or other resources that enhance the usability of your guide. Each asset should include name, type, and uri to specify the asset’s details. Adding this is especially important so that infusion can be automated via the slim-cli tool - which queries the data/slim-registry.json file.

    Here’s an example of how to structure your JSON entry:

    {
    "title": "README.md",
    "uri": "/docs/guides/documentation/readme",
    "category": "documentation",
    "description": "A template that can be used to help developers and users understand your repository's project code concisely and clearly.",
    "tags": [
    "documentation",
    "repository-setup",
    "project-template"
    ],
    "last-updated": "2023-04-27",
    "assets": [
    {
    "name": "README Template",
    "type": "text/md",
    "uri": "https://raw.githubusercontent.com/NASA-AMMOS/slim/issue-154/static/assets/communication/readme/README.md"
    }
    ]
    }
  3. Customization: Tailor the fields to match the specifics of your best practice guide. Ensure that the assets section is comprehensive and includes all relevant resources that could help users implement your guide effectively.

  4. Final Check: Before submitting, review the JSON entry to ensure all details are accurate and that the assets are correctly linked and described.

4⃣️ Get Feedback For Your Contribution

Once you've created a contribution you're happy about, it's important to gather feedback from the SLIM community. This will help ensure that your contribution aligns with the project's standards and meets the community's needs.

First ensure your pull-request is ready for review:

  1. Ensure your contribution is viewable via https://<your-username>.github.io/slim. You may need to visit your Settings page for the repository and the "Pages" sub-section to configure GitHub Pages hosting. This should be automatically set up for you, but if you need help troubleshooting please visit Docusaurus' Deploying to GitHub Pages.
  2. Within your fork (created in step 2 above) on GitHub.com, click the "Contribute" button. Choose to submit a pull request and fill in details and reference any related issues. Provide a link to the live, demo website guide page on your fork!

It's now important to request feedback from the community. To request feedback, you can do the following:

  • SLIM Reviewers: Tag @slim-community or @slim-committers in your pull request for feedback for faster responses.

  • Slack/E-mail/Discussions: Post a message in the Slack channels, over e-mail, or other mediums. Use the provided template below as a guide for your message. Don't forget to include a link to your pull request or issue for easy reference.

    As part of the Software Lifecycle Improvement & Modernization (SLIM) project, we’ve recently created a pull request (PR) introducing best practices for [INSERT GUIDE NAME HERE]. The goal is to [INSERT BRIEF GOAL HERE].

    **Purpose of this Message:**
    I'm reaching out to you all to get your thoughts on [INSERT GUIDE NAME HERE] for the SLIM project.
    The goal for the guide is to [INSERT BRIEF GOAL STATEMENT HERE].

    **Proposal Details:**
    Pull Request: [INSERT PULL REQUEST URL HERE]
    Live Demo Here: [INSERT URL TO YOUR LIVE DEMO OF THE PROPOSED GUIDE (USUALLY ON YOUR GITHUB FORK)]

    **Your Input Matters:**
    Your comments and insights would be extremely valuable. Please consider adding a comment to the PR about your thoughts!
    Thank you in advance for your support and feedback!

Feedback from the community is crucial for the refinement of your contribution and ensures its successful integration into the SLIM project.

5⃣️ Merge Your Contribution

The final step in the contribution process involves the review and potential merging of your pull request by SLIM committers and reviewers. This process includes:

  1. Review by SLIM Committers and Reviewers: Your pull request will be thoroughly reviewed by the project's committers and reviewers. They will provide feedback, suggest improvements, or approve the changes.

  2. Iterate as Required: Based on the feedback, you may need to make further adjustments to your contribution. Promptly addressing these suggestions is crucial for the progression of your pull request.

  3. Final Decision: Once your pull request meets all the criteria and standards of the SLIM project, the committers will decide to merge your contribution into the main branch. Alternatively, they might request additional changes if needed.

This process ensures that every contribution is in line with the project's goals, standards, and quality expectations, contributing to the overall excellence and reliability of the SLIM project.

- + \ No newline at end of file diff --git a/docs/guides/checklist/index.html b/docs/guides/checklist/index.html index eeec91d4..b867470b 100644 --- a/docs/guides/checklist/index.html +++ b/docs/guides/checklist/index.html @@ -11,7 +11,7 @@ - + @@ -19,7 +19,7 @@

✅ Getting Started

We have numerous guides and recommendations on this website. You are free to explore and take what you find most helpful. However, if you're looking for more guidance on how to bring together SLIM's recommendations for your project in an aggregated form, you've come to the right place.

Checklist

Here we present a checklist you can run through for your project, as well as links to automation / guides to make your project ready with the best of SLIM.

Review your project's repositories, and ensure all have the following (in prioritized order):

Checklist ItemWhy?
✅ LICENSESpell out legal terms for software usage & modification
READMEProvide project overview, setup, & usage instructions
Contributing GuideOutline how to contribute & the process for submissions
Code of ConductEstablish community behavior standards
Issue TemplatesStreamline issue reporting for consistency & clarity
Pull Request TemplatesEnsure PRs are comprehensive & adhere to project standards
DocumentationOffer detailed info on features, APIs, & customizations not covered in README
Change LogKeep track of all notable changes in each version in a human-readable format
Security & Dependency ScanningIdentify vulnerabilities & outdated dependencies for security
Sensitive Information ScanningDetect accidental commits of sensitive info
Governance ModelDefine decision-making structure & project leadership for transparency & organization
Software MetricsCollect metrics on performance of your software's governance and lifecycle

Repository Starter Kit

If you're starting a new project / repository, you can automatically get many of the above set up quickly using our:

🌐 Repository Starter Kit

There's two ways to use the Repository Starter Kit:

Using our Template Repository

  1. Navigate to the Starter Kit: Go to the SLIM Repository Starter Kit on GitHub.
  2. Create Your Repository: Click the "Use this template" button at the top right of the GitHub repository page. GitHub Use Template Button Example screenshot of button to click.
  3. Set Up Your New Repository: Fill in the Repository name and description for your new project. Decide whether your repository will be public or private. Click "Create repository from template".
  4. Customize Your Repository: After creation, go through your new repository and look for files containing the [INSERT ...] text. These are placeholders for you to replace with information specific to your project. This includes project name, description, contributing guidelines, etc.
  5. Push Changes: Once you've made your customizations, commit and push the changes to your repository to ensure all your project information is up to date.

Manually Cloning and Using the Template

If you prefer to set up your project manually or need more control over the initial setup, follow these steps:

  1. Clone the Starter Kit:

    • Open your terminal or command prompt.
    • Navigate to the directory where you want your project to be.
    • Clone the repository using:
      git clone https://github.com/nasa-ammos/slim-starterkit.git YOUR_PROJECT_NAME
    • Replace YOUR_PROJECT_NAME with the desired name of your project folder.
  2. Navigate to Your Project Directory:

    cd YOUR_PROJECT_NAME
  3. Remove the Git History and start fresh:

    rm -rf .git
    git init
  4. Customize Your Project: Go through the cloned files and modify any [INSERT ...] placeholders with information relevant to your project. This step is crucial for tailoring the starter kit to your specific project needs.

  5. Initial Commit:

    • Add all the files to your new git repository:
      git add .
    • Commit the changes:
      git commit -m "Initial commit with SLIM Repository Starter Kit"
  6. Create a New Repository on GitHub (or your preferred Git hosting service) without initializing it with a README, .gitignore, or license since your project already contains these files.

  7. Link Your Local Repository to GitHub:

    • Follow the instructions provided by GitHub to push an existing repository from the command line, which will include:
      git remote add origin <repository-URL>
      git branch -M main
      git push -u origin main
  8. Push Your Changes: Ensure all your customizations and initial setup are pushed to your remote repository.

By following these steps, you'll have a new project set up with best practices in repository setup, including a LICENSE, README, contributing guide, and more. Remember to keep your project information up to date and regularly review the repository for any updates to the starter kit that might be beneficial to your project.

- + \ No newline at end of file diff --git a/docs/guides/documentation/change-log/index.html b/docs/guides/documentation/change-log/index.html index c314a9c3..173482c3 100644 --- a/docs/guides/documentation/change-log/index.html +++ b/docs/guides/documentation/change-log/index.html @@ -11,13 +11,13 @@ - +

Change Log

A guide for setting up a log to document software changes in a human-centric format.

changelog-screenshot-example

Example CHANGELOG.md template rendering

Introduction

Background: A change log is a vital tool for documenting significant changes in software over time in a format accessible to humans. It plays a critical role in conveying the evolution of software, including additions, deprecations, and removals. We feel a change log is especially good for noting feature changes, rather than focusing on developer oriented commit changes. This guide outlines the best practices for maintaining a CHANGELOG.md file, complementing release pages and enhancing software distribution transparency.

Use Cases:

  • Documenting software changes for easy understanding and tracking for a broad audience.
  • Enhancing transparency in software development and release cycles.
  • Storing the history of significant changes independent of code hosts like GitHub.com

Prerequisites

  • Familiarity with semantic versioning and release cycles.
  • Basic knowledge of Markdown formatting.

Quick Start

⬇️ Keep a Changelog (see example)

Download a template for creating a human-readable change log for your software project.


Step-by-Step Guide

  1. Team Agreement: Discuss the importance of a change log with your team, emphasizing its value for transparency and communication.
  2. Creating the Change Log:
    • Start a CHANGELOG.md in your repository.
    • See demo use of the templates like Demo 1 or Demo 2 as a base.
    • Customize the file with your project's release information.
  3. Integrating with Project Documentation:
    • Link to the CHANGELOG.md from your project’s README.md to enhance visibility.

Frequently Asked Questions (FAQ)

  • Q: Why is a CHANGELOG.md crucial even if there's a GitHub auto-generated release changes page?
  • A: We think they are complementary. Releases are great for commit-level information; but changelogs are better suited to a broader audience. It also ensures future-proof accessibility of change information, especially for users who may not have access to the project's release page or if the software has changed hands. Moreover, its meant to be feature-centric and designed for people to understand, rather than GitHub's commit-oriented change reports.

Credits

Authorship:

Acknowledgements:

  • This guide draws from the "Keep a Changelog" standard and examples from various open source projects.

Feedback and Contributions

Feedback and contributions are encouraged to refine this guide. See our contribution guidelines.

- + \ No newline at end of file diff --git a/docs/guides/documentation/documentation-hosts/index.html b/docs/guides/documentation/documentation-hosts/index.html index 2dae3dc0..f108806e 100644 --- a/docs/guides/documentation/documentation-hosts/index.html +++ b/docs/guides/documentation/documentation-hosts/index.html @@ -11,13 +11,13 @@ - +

Documentation Hosting

Guidance on selecting and implementing documentation hosting tools.

docs-screenshot-example

Example documentation hosts (no endorsement implied)

Introduction

Background: Choosing the right platform for hosting various types of documentation is crucial for project success. This guide explores use cases for different documentation types and recommends tools for hosting user, developer, admin, API docs, and more, aiding in informed decision-making for documentation management.

Use Cases:

  • Hosting user, admin, developer, and API documentation.
  • Selecting appropriate platforms for different documentation needs.
  • Ensuring accessible and maintainable documentation for a variety of audiences.

Prerequisites

  • Understanding of different types of documentation and their requirements.
  • Familiarity with various documentation hosting platforms and frameworks.

Quick Start

📔 Documentation Use Cases and Hosting Tools

See various use cases and then select corresponding tools for hosting different types of documentation.

📔 Trade Study on Hosting Frameworks

View a trade-study of documentation hosting tools that we recommend.


Step-by-Step Guide

  1. Identify Documentation Needs: Assess your project's documentation requirements based on use cases. For use case mapping, consult our Documentation Use Cases and Hosting Tools document.
  2. Choose Appropriate Tools: Refer to the Trade Study on Hosting Frameworks to select the right tools for your documentation type based on features.
  3. Implement and Maintain: Set up your chosen documentation platforms and ensure they are regularly updated and maintained.

Frequently Asked Questions (FAQ)

  • Q: How do I choose the right platform for hosting my project's documentation?
  • A: Consider the type of documentation (user, admin, developer, API), the audience, and the specific features offered by the hosting platforms.

Credits

Authorship:


Feedback and Contributions

Feedback and contributions are welcome to enhance this guide. Visit our contribution guidelines.

- + \ No newline at end of file diff --git a/docs/guides/documentation/documentation-hosts/trade-study-hostingdocs-user/index.html b/docs/guides/documentation/documentation-hosts/trade-study-hostingdocs-user/index.html index 69ded58e..97d29c7e 100644 --- a/docs/guides/documentation/documentation-hosts/trade-study-hostingdocs-user/index.html +++ b/docs/guides/documentation/documentation-hosts/trade-study-hostingdocs-user/index.html @@ -11,13 +11,13 @@ - +

Docs Hosting Trade Study

Tool NameLicensingRender TimeVCS Stored ContentSelf-HostingManaged HostingAPI ActionsGenerate API DocsWYSIWYG EditingMarkdown SupportEmbedded ContentHierarchial StructureTemplating SupportRolesComment SupportSearchImport From Other FormatsExport Other FormatsAnchor LinksDraft ContentVersion ControlInternationalizationFile UploadsDiagram EditingUsage AnalyticsExtension Support
GitBookFree for OSS / $$$FastYesNoYesYesYesYesYesYesYesYesYesYesYesYesYesYesYesYesYesYesNoYesNo
ConfluenceFree for OSS / $$SlowNoNoYesYesNo (only non-free plugins)YesPartial (for embedded content)YesYesYesYesYesYesYesYesYesYesYesNoYesNo (only non-free plugins)No (non-free plans only)Yes
ReadTheDocsFree (with ads)SlowYesNoYesYesNoNoYesYesYesYesYesNoYesLimitedYesYesNoYes including doc packagesYesNo (embed/links only)Yes (with plugins)YesYes
mdBookFree (Mozilla Public License 2.0)FastYesYesNo (static hosts only)NoNoNoYesNoYesYesNoNoYesNoNoYesNoYesNoNo (embed/links only)Yes (as a preprocessor)No (third-party only)Yes
DocusaurusFree MIT LicenseFastYesYesYes (with static site hosts like Vercel, Netlify)NoYes (with plugins)NoYesYesYesYesNoNoYesNoNo (but can be done with external tools)YesNoNoYesNo (embed/links only)Yes (with plugins)No (third-party only)Yes
DocsifyFree MIT LicenseFastYesYesYes (with static site hosts like GitHub Pages, Netlify)NoNoNo (Markdown-based)YesYesYesNoNoNoYes (with plugins)NoNoYesNoNoNoNo (embed/links only)Partial (with plugins)No (third-party only)Yes
- + \ No newline at end of file diff --git a/docs/guides/documentation/documentation-hosts/use-cases/index.html b/docs/guides/documentation/documentation-hosts/use-cases/index.html index 15adda74..278b4043 100644 --- a/docs/guides/documentation/documentation-hosts/use-cases/index.html +++ b/docs/guides/documentation/documentation-hosts/use-cases/index.html @@ -11,13 +11,13 @@ - +

Docs Use Cases

- + \ No newline at end of file diff --git a/docs/guides/documentation/readme/index.html b/docs/guides/documentation/readme/index.html index 04f3ae69..effb4ab1 100644 --- a/docs/guides/documentation/readme/index.html +++ b/docs/guides/documentation/readme/index.html @@ -11,13 +11,13 @@ - +

READMEs

A guide to constructing an effective and impressive README for your project.

readme-screenshot-example

Example README.md template rendering

Introduction

Background: A well-crafted README.md is essential for introducing your software repository. It serves as the first point of contact for developers and users, providing a concise and clear overview. This guide offers a template suitable for various software projects, aiding in the creation of effective README files.

Use Cases:

  • Creating an informative and welcoming introduction to your software project.
  • Standardizing README content across various types of software repositories.
  • Enhancing project understanding for contributors and users.
  • Answering questions in a FAQ setting
  • Guiding readers on licensing and who to contact for support

Prerequisites

  • Basic knowledge of Markdown formatting.
  • Understanding of your project's key features and usage.

Quick Start

⬇️ README Template (see example)

Download and fill-out our recommended README template to get started on crafting your project's introduction.


Step-by-Step Guide

  1. Discuss with Your Team: Ensure consensus on adopting the README Template.
  2. Customize the Template:
    • Copy the README Template into a README.md file in your repository.
    • Replace [INSERT ...] placeholders with your project's specifics.
  3. Integrate into Your Project:
    • Link to the CONTRIBUTING.md within your README.md for easy access.

Frequently Asked Questions (FAQ)

  • Q: What makes a README file effective?
  • A: Clarity, completeness, and relevance of information regarding the project's purpose, usage, and contribution process.

Credits

Authorship:

Acknowledgements:

  • Inspired by README best practices from NASA-AMMOS, ReactJS, VueJS, and Apache Kafka.

Feedback and Contributions

We welcome feedback and improvements to this template. See our contribution guidelines.

- + \ No newline at end of file diff --git a/docs/guides/governance/contributions/code-of-conduct/index.html b/docs/guides/governance/contributions/code-of-conduct/index.html index 38e2770f..9d59c48f 100644 --- a/docs/guides/governance/contributions/code-of-conduct/index.html +++ b/docs/guides/governance/contributions/code-of-conduct/index.html @@ -11,13 +11,13 @@ - +

Code of Conduct

A walkthrough on setting up a code-of-conduct policy for your project.

code-of-conduct-screenshot-example

Example CODE_OF_CONDUCT.md template rendering

Introduction

Background: A Code of Conduct is important in setting the standards for interaction within a project team. It promotes a positive community environment, addressing unacceptable behaviors and providing mechanisms for conflict resolution. In this guide, we'll help you bootstrap your project with a recommended a Code of Conduct, notably the Contributor Covenant, which is widely recognized and adopted in open-source communities.

Use Cases:

  • Establishing a respectful and inclusive team culture.
  • Providing clear guidelines on acceptable behaviors and handling grievances.

Prerequisites

  • Understanding of community management and team dynamics.
  • Familiarity with Markdown for editing documentation.

Quick Start

⬇️ Contributor Covenant Template (see example)

Access the standard Contributor Covenant template for a robust Code of Conduct policy to use in your project.


Step-by-Step Guide

  1. Team Consultation: Discuss the adoption of the Contributor Covenant with your team, ensuring consensus.
  2. Setting Up the Document:
    • Create a CODE_OF_CONDUCT.md file in your repository.
    • Copy the Contributor Covenant template into this file.
    • Replace [INSERT CONTACT METHOD] with appropriate contact details for reporting issues.
  3. Integrate into Your Project:
    • Add the Contributor Covenant badge () to your README.md for visibility and easy access.
      [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](CODE_OF_CONDUCT.md)

Frequently Asked Questions (FAQ)

  • Q: Why is a Code of Conduct important for projects?
  • A: It establishes a standard for behavior, promoting a safe and inclusive environment for collaboration.
  • Q: Can I customize the Contributor Convenant Code of Conduct template?
  • A: Yes! Especially if your project is managed in a unique way.
  • Q: What should be done if a team member violates the Code of Conduct?
  • A: Violations should be reported to the designated contact person or team. The matter should be handled confidentially and in accordance with the guidelines set forth in the Code of Conduct.
  • Q: How often should the Code of Conduct be reviewed or updated?
  • A: Regularly reviewing and updating the Code of Conduct ensures it stays relevant and effective. It's advisable to reassess it annually or when significant changes occur within the project or community.

Credits

Authorship:

Acknowledgements:


Feedback and Contributions

Your feedback and contributions are welcome to enhance this guide. See our contribution guidelines.

- + \ No newline at end of file diff --git a/docs/guides/governance/contributions/contributing-guide/index.html b/docs/guides/governance/contributions/contributing-guide/index.html index ac6aa9d4..9c9edb3b 100644 --- a/docs/guides/governance/contributions/contributing-guide/index.html +++ b/docs/guides/governance/contributions/contributing-guide/index.html @@ -11,14 +11,14 @@ - +

Contributing Guide

Fast track developing a contribution guide for your new contributors.

contributing-screenshot-example

Example CONTRIBUTING.md template rendering

Introduction

Background: A well-defined contribution guide is crucial for open-source projects. It helps new contributors understand the expectations and processes for contributing effectively. We walk you through developing a contribution guide for your project via our template, which sets clear standards for contributions and details a recommended process to follow.

Use Cases:

  • Guiding new contributors on making meaningful contributions.
  • Ensuring contributions align with project norms and requirements.
  • Facilitating a transparent and efficient contribution process.

Prerequisites

  • Understanding of basic project management and version control systems.
  • Familiarity with GitHub and Markdown formatting.

Quick Start

⬇️ Contributing Guide Template (see example)

Download our customizable template to create a contributing guide for your project.


Step-by-Step Guide

  1. Team Discussion: Collaborate with your team to decide on adopting a contribution guide template. This step is essential to establish project norms and contribution expectations. Our template touches the following topics that you'll want to consider:
    • License overview
    • Code of Conduct
    • Governance Model
    • Developer environment setup
    • Communication channels
    • How-to in interacting with the codebase
    • Pull requests
    • Ways to contribute (e.g. code, docs, media, etc.)
    • etc.
  1. Customize the Template: Modify the Contributing Guide Template to fit your project's specifics.
    • Copy the template and create a CONTRIBUTING.md file at the root level of your repository.
    • Replace [INSERT ...] placeholders with your project's details. These markers are used throughout to designate customization options.
  2. Integrate with Project:
    • Add a link to CONTRIBUTING.md in your project's README.md file under the Contributing section.
  3. Communicate:
    • Share the guide with existing contributors and all new contributors as their starting point.

Frequently Asked Questions (FAQ)

  • Q: Why should I even bother with a contributing guide?
  • A: Do you want to constantly re-explain your project's philosophy and contribution norms? If not - a contribution guide helps set clear expectations and processes, ensuring contributions are consistent and aligned with your project's goals.

Credits

Authorship:

Acknowledgements: This template is influenced by guidelines from the following:


Feedback and Contributions

Feedback and contributions are encouraged to refine this guide. Visit our contribution guidelines for more information.

- + \ No newline at end of file diff --git a/docs/guides/governance/contributions/issue-templates/index.html b/docs/guides/governance/contributions/issue-templates/index.html index aff84a98..96eb0cda 100644 --- a/docs/guides/governance/contributions/issue-templates/index.html +++ b/docs/guides/governance/contributions/issue-templates/index.html @@ -11,13 +11,13 @@ - +

Issue Templates

Make issue tracking clear and consistent with structured issue templates for your contributors.

issue-bug-screenshot-example

Example bug issue ticket template rendering

Introduction

Background: Implementing issue templates in GitHub projects standardizes and clarifies the submission of bug reports, feature requests, and other types of issues. It guides contributors in providing essential details, helping developers understand and address issues more efficiently.

Use Cases:

  • Enhancing clarity and consistency in issue or feature reporting.
  • Streamlining the process for contributors to report bugs or request features so that a wide audience can switch between issues easily.
  • Improving developers' understanding of issues for quicker resolution.

Prerequisites

  • Access to a GitHub repository with administrative permissions.
  • Basic knowledge of Markdown for editing GitHub templates.

Quick Start

Bug Reports:

New Features:

Resources:

📔 GitHub Issue Template Documentation


Step-by-Step Guide

  1. Team Discussion: Consult with your team about using GitHub issue templates. Reach a consensus on adopting this practice. Our suggested templates request the below information - see justification below.

    • Bug Reports:
      1. Checked for duplicates: This section asks the contributor to verify if the issue has already been reported. It helps prevent duplication and streamlines the issue management process.
      2. Describe the bug: The contributor provides a clear, concise description of the bug. This section is critical for developers to understand the issue's nature and impact.
      3. What did you expect?: Understanding the contributor's expectations clarifies the disparity between expected and actual behavior, helping to pinpoint the issue more accurately.
      4. Reproducible steps: Step-by-step reproduction instructions are crucial for developers to replicate the issue, diagnose the problem, and test solutions effectively.
      5. What is your environment?: Providing details about the hardware, operating system, or other contextual factors helps in identifying if the bug is environment-specific and aids in troubleshooting.
    • New Features:
      1. Checked for duplicates: This section ensures the contributor has checked for existing feature requests, avoiding redundancy and streamlining the development process.
      2. Alternatives considered: Encourages the contributor to consider and document alternative solutions. This insight can guide the evaluation of the feature's necessity.
      3. Related problems: This helps identify if the feature request is a solution to an existing problem, adding context and justification for the request.
      4. Describe the feature request: A clear description from the contributor about the proposed feature. This clarity is essential for understanding the feature's purpose and scope.
  2. Setting Up Issue Templates:

  3. Commit and Use Templates:

    • Commit these files to the main branch.
    • New issues in your repository will now offer these templates for contributors to fill.

Frequently Asked Questions (FAQ)

  • Q: Why are issue templates important in GitHub projects?
  • A: They ensure that all necessary information is provided, leading to more effective issue tracking and resolution.

Credits

Authorship:

Acknowledgements:

  • GitHub for providing documentation for issue templates.

Feedback and Contributions

Feedback and contributions are welcome to enhance these guidelines. See our contribution guidelines.

- + \ No newline at end of file diff --git a/docs/guides/governance/contributions/pull-requests/index.html b/docs/guides/governance/contributions/pull-requests/index.html index 6f854e8e..ca8156c4 100644 --- a/docs/guides/governance/contributions/pull-requests/index.html +++ b/docs/guides/governance/contributions/pull-requests/index.html @@ -11,13 +11,13 @@ - +

Pull Requests

A template to standardize pull-requests.

pr-screenshot-example

Example pull request template in action rendering

Introduction

Background: Pull requests help manage contributions to projects, especially on platforms like GitHub. By using a standardized pull request template, projects can streamline the contribution process, providing clarity and consistency for both contributors and maintainers. This guide will help you implement a GitHub Pull Request Template to improve how contributions are made to your project.

Use Cases:

  • Standardizing the format of pull requests for clarity and efficiency.
  • Providing guidelines to contributors for submitting well-documented pull requests.

Prerequisites

  • Access to a GitHub repository where you can add files.
  • Basic understanding of GitHub's file structure and Markdown formatting.

Quick Start

⬇️ Pull Request Template (see example)

Our recommended pull request template for projects.

📔 GitHub Pull Request Template Documentation

Recommendations from GitHub.com on how-to facilitate the use of pull request templates.


Step-by-Step Guide

  1. Team Discussion: Discuss the benefits of a pull request template with your team. Gain consensus on adopting this approach for consistency in contributions. Below is an explanation of our recommended Pull Request Template file's fields. Adjust as necessary.

    • Purpose: To clearly state the intention behind the pull request. This helps reviewers understand the context and significance of your changes.
    • Proposed Changes:
      • [ADD] for new features or content the contributor introduced.
      • [CHANGE] for modifications to existing features or code.
      • [REMOVE] for removals features or code.
      • [FIX] for bug fixes the contributor implemented.
    • Issues: To link any related issues your PR addresses. This creates a traceable connection between the issue and the solution provided.
    • Testing: To document how the contributor tested the changes. Including links to test results or noting the operating systems on which the tests were performed. This assures reviewers of the reliability and effectiveness of changes.
  2. Create Template Directory:

    • In your GitHub repository, create a .github/ folder to hold community health files.
  3. Add Pull Request Template:

    • Copy the Pull Request Template into .github/PULL_REQUEST_TEMPLATE.
    • Commit and push this file to the main branch of your repository.
  4. Usage:

    • Once set up, this template will automatically appear in the pull request description box for contributors to fill out.

Frequently Asked Questions (FAQ)

  • Q: How does a pull request template improve contributions?
  • A: It provides a structured format for contributors, ensuring all necessary information is included, which facilitates better review and collaboration.

Credits

Authorship:


Feedback and Contributions

We welcome feedback and contributions to enhance this guide. For contributing, please see our contribution guidelines.

- + \ No newline at end of file diff --git a/docs/guides/governance/governance-model/index.html b/docs/guides/governance/governance-model/index.html index 9810f764..2d6a4a5e 100644 --- a/docs/guides/governance/governance-model/index.html +++ b/docs/guides/governance/governance-model/index.html @@ -11,13 +11,13 @@ - +

Governance Model

Establish a project guide for effective open source governance.

governance-screenshot-example

Example GOVERNANCE.md template rendering

Introduction

Background: A governance model is essential for government-sponsored open source projects, particularly for medium and large-sized teams. It helps in liberal acceptance of public contributions while retaining decision-making authority with funding sources. This guide provides a template to integrate a generalized governance model into your project, promoting clear operational structure and community engagement.

Use Cases:

  • Structuring decision-making processes in open source projects.
  • Balancing open contribution with authoritative project direction.
  • Establishing clear roles and responsibilities within the project team.

Prerequisites

  • Understanding of open source project management.
  • Familiarity with Markdown for editing GitHub documentation.

Quick Start

⬇️ Governance Model Template (Small Teams)

Our recommended governance model for small teams of 1-3 active members.

⬇️ Governance Model Template (Medium Teams)

Our recommended governance model for small teams of 3-10 active members.

⬇️ Governance Model Template (Large Teams)

Our recommended governance model for small teams of 10+ active members.


Step-by-Step Guide

  1. Team Consultation: Collaborate with your team and stakeholders to discuss adopting the governance model template. You'll want to consider topics like:
    • Decide on how big of a team you currently estimate wanting to be
    • Roles (and how contributors can be promoted between roles)
    • Committees (the groups that guide your project - if any)
  2. Choose the Right Template:
  3. Customize the Template:
    • Place the template in a GOVERNANCE.md file within the root folder of your repository.
    • Modify and personalize the template, replacing [INSERT ...] text with specifics for your project.
  4. Implement the Governance Model:
    • Commit the GOVERNANCE.md file to the main branch.
    • Link to the governance document in your README.md file under the Contributing section.

Frequently Asked Questions (FAQ)

  • Q: What is the importance of a governance model in open source projects?
  • A: It provides a framework for decision-making, contribution processing, and project management, ensuring transparent and structured project operations.
  • Q: What is the role of the Technical Steering Committee (TSC) in this governance model?
  • A: The TSC handles technical decisions, governance processes, and maintains collaborator lists, playing a key role in guiding the project's technical aspects.
  • Q: How are conflicts resolved in this governance model?
  • A: The model advocates for consensus-seeking; however, if consensus cannot be reached, the Product Manager has final authority, ensuring decision-making progresses.
  • Q: Can the governance structure be modified as the project evolves?
  • A: Yes, the model is flexible and can adapt to changing project needs, subject to agreement by the Project Steering Committee (PMC) and stakeholders.
  • Q: Should I choose the right GOVERNANCE template for my current team size or intended team size?
  • A: Governance models should evolve slowly - therefore, choose the governance model that is right for your aspirational team size and modify if needed in the future.

Credits

Authorship:

Acknowledgements:

This template was developed by evaluating best GOVERNANCE.md practices in the following organizations:


Feedback and Contributions

We invite feedback and contributions to refine this guide. Visit our contribution guidelines.

- + \ No newline at end of file diff --git a/docs/guides/search/index.html b/docs/guides/search/index.html index 1bcafccc..b43e88d3 100644 --- a/docs/guides/search/index.html +++ b/docs/guides/search/index.html @@ -11,13 +11,13 @@ - + - + \ No newline at end of file diff --git a/docs/guides/software-lifecycle/application-starter-kits/python-starter-kit/index.html b/docs/guides/software-lifecycle/application-starter-kits/python-starter-kit/index.html index c8e7d333..fc0ca9c9 100644 --- a/docs/guides/software-lifecycle/application-starter-kits/python-starter-kit/index.html +++ b/docs/guides/software-lifecycle/application-starter-kits/python-starter-kit/index.html @@ -11,7 +11,7 @@ - + @@ -21,7 +21,7 @@ Documentation requiring updates are marked with the keyword INSERT.

  • Apply Project Settings:

    • Update pyproject.toml to update build system dependencies
    • Update setup.cfg to specify build system configurations
      • Edit the metadata keywords to set appropriate keyword values to apply to your project.
        1. Set name to match your unique module name.
        2. Modify console_scripts to point at your entry-point:
          1. If your module launches from a command-line, define an executable inside your module: executable-name = my_package.module:function.
          2. Set the value as empty if there is no entry-point.
        3. Update URLs for your project.
          1. Replace NASA-AMMOS/slim-starterkit-python with the project/repo_name for your cloned project (or refactoring on an existing repo).
        4. Update author, author_email, description and keywords to reflect your project details
  • Build Locally: The application will build, install and deploy from a local command line when all configurations are properly set.

    • Install local tooling and requirements
    • Clean and build and clean again after module builds successfully

      Information
      To validate deployment, we publish the module to the Test PyPi sandbox by default. To publish on the official PyPi, a minor configuration change is required. All previous steps must be complete and the application must build successfully.

  • Build on GitHub: A release kicks off a build and release process in GitHub Actions.


  • Additional Files

    This starter kit produces several deliverables deployed for distribution:


    Frequently Asked Questions (FAQ)

    • Q: What tooling is necessary to use the starter kit?

    • A: Python 3 version 3.9 or greater must be installed with a working package manager (pip). As well, you'll need the latest versions of pip, build, setuptools, twine and wheel.

      python3 --version  # must report Python >3.9.x
      pip3 install --upgrade pip
      pip3 install --upgrade build setuptools setuptools_scm twine wheel
    • Q: How do I choose a unique module name for PyPi?

    • A: Research existing modules on PyPi and select a name that is not already in use. Consider using a creative and/or descriptive name relevant to your project.


    Credits

    Authorship:

    Acknowledgements:

    • Inspiration and practices from modern Python tooling and GitHub CI/CD workflows.

    Feedback and Contributions

    We welcome feedback and contributions to improve this guide. Please see our contribution guidelines.

    - + \ No newline at end of file diff --git a/docs/guides/software-lifecycle/continuous-delivery/index.html b/docs/guides/software-lifecycle/continuous-delivery/index.html index c1975ef4..7264bd23 100644 --- a/docs/guides/software-lifecycle/continuous-delivery/index.html +++ b/docs/guides/software-lifecycle/continuous-delivery/index.html @@ -11,13 +11,13 @@ - +

    Continuous Delivery

    A streamlined guide to setting up a robust continuous delivery pipeline, automating releases, and maintaining best practices.

    Introduction

    Background

    Continuous Delivery (CD) is the practice of automatically preparing code changes for production release, extending Continuous Integration (CI) to ensure that every validated change is always production-ready. This guide presents a simplified, practical approach to implementing CD through standardized repository selections, naming conventions, and automation.

    Approach

    Adopt a clear, four-step plan to implement Continuous Delivery effectively:

    1. Choose repositories.
    2. Adopt standardized naming conventions.
    3. Automate publishing.
    4. Maintain the delivery pipeline.

    Key Use Cases

    • Auto-publishing built artifacts to package managers.
    • Standardizing naming conventions across repositories.
    • Versioning releases using semantic versioning.
    • Distributing test data automatically.
    • Automating container image publication.
    • Enabling infrastructure-as-code deployment.

    Quick Start

    The most important step in setting up continuous delivery is choosing the right repositories and implementing proper naming conventions.

    Key Concepts to Get Started:

    ⬇️ Choose a Package Repository based on your artifact type:

    • PyPI for Python packages
    • Maven Central for Java
    • NPM Registry for NodeJS
    • ECR (Amazon Elastic Container Registry)/DockerHub for Containers

    📝 Implement Standardized Naming Conventions:

    • nasa-[project-org]-[module-name] for Python
    • gov.nasa.[project-org].[module-name] for Java
    • @nasa-[project-org]/[module-name] for NodeJS

    🚀 Set up Automated Publishing using GitHub Actions

    Step-by-Step Guide

    1. Select Package Repositories

    Choose appropriate repositories based on your artifact type:

    1.1 Code Packages

    Python Packages
    • Repository: PyPI
    • Size Limit: 60MB
    • Cost: Free
    • Best For: Python libraries and tools
    • Setup Steps:
      1. Create account on PyPI
      2. Set up project with setup.py or pyproject.toml
      3. Configure automated publishing
    Java Packages
    • Repository: Maven Central
    • Size Limit: No specific limit
    • Cost: Free
    • Best For: Java libraries and frameworks
    • Setup Steps:
      1. Create Sonatype account
      2. Configure Maven settings
      3. Set up GPG signing
    NodeJS Packages
    • Repository: NPM Registry
    • Size Limit: No specific limit
    • Cost: Free
    • Best For: JavaScript/TypeScript packages
    • Setup Steps:
      1. Create NPM account
      2. Configure package.json
      3. Set up automated publishing

    1.2 Container Images

    Public Containers
    • Repository: GitHub Packages/GitLab Registry
    • Best For: Open source projects
    • Limitations: Higher latency for runtime
    Private Containers
    • Repository: Amazon ECR
    • Best For: Production deployments
    • Features: Low-latency pulls, private repos

    1.3 Test Data

    Small Datasets (<2GB)
    • Repository: GitHub/GitLab Releases
    • Naming: [project-org]-[project-module]-test-dataset
    • Best For: Unit test data, small samples
    Medium Datasets (2GB-100GB)
    • Repository: Amazon S3
    • Features: Pre-signed URLs, bandwidth control
    • Best For: Integration test data
    Large Datasets (>100GB)
    • Repository: EOSDIS DAAC (Earth data) or PDS (Planetary data)
    • Best For: Mission data, large-scale testing

    2. Implement Naming Conventions

    2.1 Package Naming

    Follow standard naming conventions for each repository type:

    2.2 Version Naming

    Use semantic versioning (MAJOR.MINOR.PATCH):

    • MAJOR: Breaking changes
    • MINOR: New features, backward compatible
    • PATCH: Bug fixes

    3. Automate Publishing

    3.1 GitHub Actions Workflow

    name: Publish Package

    on:
    release:
    types: [published]

    jobs:
    publish:
    runs-on: ubuntu-latest
    steps:
    - uses: actions/checkout@v3
    - name: Set up environment
    uses: actions/setup-python@v3
    with:
    python-version: '3.x'
    - name: Build and publish
    env:
    TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
    TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
    run: |
    python -m pip install build twine
    python -m build
    python -m twine upload dist/*

    3.2 Automated Testing Integration

    4. Maintain Delivery Pipeline

    Regular maintenance tasks:

    1. Update repository credentials
    2. Monitor publishing success rates
    3. Verify artifact integrity
    4. Review and update workflows
    5. Clean up old artifacts

    5. GitHub Actions Workflow Example for PyPI Project Continuous Delivery

    Create a .github/workflows/pypi-cd.yml file in your GitHub repository with the following content:

    name: Continuous Delivery for PyPI Project

    on:
    push:
    branches:
    - main # Trigger on push to the 'main' branch
    tags:
    - 'v*.*.*' # Trigger on tags matching semantic versioning (v1.0.0)

    jobs:
    # Job to set up the environment, install dependencies, and publish to PyPI
    publish-to-pypi:
    runs-on: ubuntu-latest

    steps:
    - name: Checkout repository
    uses: actions/checkout@v3

    - name: Set up Python
    uses: actions/setup-python@v3
    with:
    python-version: '3.x' # Use a specific Python version, e.g., '3.8', '3.9', etc.

    - name: Install dependencies
    run: |
    python -m pip install --upgrade pip
    pip install build twine # Required for building and publishing to PyPI

    - name: Build the package
    run: |
    python -m build # This creates the distribution files under the 'dist' directory

    - name: Publish package to PyPI
    env:
    TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} # Store PyPI credentials as GitHub secrets
    TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
    run: |
    python -m twine upload dist/* # Uploads the package to PyPI

    Frequently Asked Questions (FAQ)

    Q: How do I handle dependencies between packages?

    A: Use semantic versioning and dependency ranges to manage package relationships.

    Q: What about handling sensitive data in artifacts?

    A: Use private repositories and encrypted secrets in CI/CD pipelines.

    Q: How often should artifacts be published?

    A: Publish on every tagged release for stable versions, and optionally for development versions.

    Q: How to manage large binary artifacts?

    A: Use specialized repositories like Amazon S3 for large artifacts and reference them in package metadata.

    Credits

    Authorship:

    Acknowledgements:

    • Thanks to the SLIM team for providing guidance

    Feedback and Contributions

    We welcome feedback and contributions to help improve and grow this page. Please see our contribution guidelines.

    - + \ No newline at end of file diff --git a/docs/guides/software-lifecycle/continuous-integration/continuous-integration-frameworks/index.html b/docs/guides/software-lifecycle/continuous-integration/continuous-integration-frameworks/index.html index fcb10c95..ad277676 100644 --- a/docs/guides/software-lifecycle/continuous-integration/continuous-integration-frameworks/index.html +++ b/docs/guides/software-lifecycle/continuous-integration/continuous-integration-frameworks/index.html @@ -11,13 +11,13 @@ - +

    CI Tools and Frameworks

    - + \ No newline at end of file diff --git a/docs/guides/software-lifecycle/continuous-integration/index.html b/docs/guides/software-lifecycle/continuous-integration/index.html index 4da36778..5222daad 100644 --- a/docs/guides/software-lifecycle/continuous-integration/index.html +++ b/docs/guides/software-lifecycle/continuous-integration/index.html @@ -11,13 +11,13 @@ - +

    Continuous Integration

    A guide for implementing continuous integration in software projects.

    Introduction

    Background: Continuous Integration (CI) is a critical practice in modern software development. This guide introduces a comprehensive approach to CI from analysis and testing to orchestration and release management. We focus on using a variety of tools like Git hooks, GitHub Actions, Jenkins and more to create a robust CI pipeline. By following this guide and combining these tools, developers can automate their build and testing processes, ensuring software is always ready for deployment.

    Use Cases:

    • Automating software integration, build and testing.
    • Streamlining software release and deployment.
    • Ensuring software quality and reliability through repeatable engineering processes.

    Prerequisites

    • Basic knowledge of software development and version control systems.
    • Familiarity with CI/CD concepts and practices.

    Quick Start

    📔 CI Tools and Frameworks

    Click the link above to explore various tools and systems for setting up and optimizing your CI pipeline.

    📔 CI Reference Architectures

    Click the link above to explore the overall sample architecture for a continuous integration system.


    Step-by-Step Guide

    1. Explore CI Tools and Frameworks:

      • Start with the CI Tools and Frameworks guide to explore a range of task-focused CI tools.
      • Review different categories, such as Analysis and Testing, Credentialing, and Execution and Reporting Tests.
      • Based on your use case, select appropriate tools, e.g. Git hooks, GitHub Actions, Jenkins, Maven plugins or SetupTools, that are are designed for the software and technologies used in your product.
    2. Understand the Reference Architecture:

      • Dive into the CI Reference Architectures for an overview of the CI process and its components.
      • Understand the core concepts of the CI pipeline, which involves Developer systems, Continuous Integration services, Continuous Deployment platforms and other touchpoints.
      • Learn about the discrete process steps in a CI pipeline, such as Compile, Test, Package, Publish and Deploy (i.e. release or deliver), and how they contribute to building and releasing software.
    3. Combine Reference Architecture with Selected Tools:

      • With the understanding of CI tools from Step 1, and the architectural insights from Step 2, begin mapping tools to specific roles in your CI pipeline.
      • For example:
        • Use Git hooks for code check-ins and GitHub Actions for automated build and testing processes.
        • Utilize Jenkins or GitHub Actions for more complex workflows, like orchestrating builds across different environments or managing deployment strategies.
        • Implement credentialing tools like Jenkins Credentials Binding Plugin or OAuth for secure access to resources.
        • Ensure testing is thorough by integrating language-specific plugins or frameworks, such as Maven for Java or PyTest for Python, into your build process.
        • Use orchestration tools like Ansible for deployment, aligning them with your cloud infrastructure managed by Terraform or Kubernetes.
        • Package your application using Docker or appropriate tools, ensuring a streamlined process from development to deployment.
    • Update a copy of the reference architectural diagram with your selected tools.

    Frequently Asked Questions (FAQ)

    • Q: How do I choose the right CI tools for my project?

    • A: Consider your project’s language, complexity and the specific needs of your deployment environment. Research and compare tools to find the best fit for your workflow.

    • Q: What is the difference between Continuous Integration, Continuous Deployment and Continuous Delivery?

    • A: These related concepts apply to different target stages of a comprehensive CI/CD process, but their definitions are often conflated. Each one of these stages determines the endpoint as builds progress in a given automation process.

      • Continuous Integration (code compiles) tests whether code when merged from different developer systems can be compiled together on a control system, e.g. the CI server.
      • Continuous Deployment (packages published) is the publishing of successfully compiled and packaged binaries to an artifact repository and/or local test servers.
      • Continuous Delivery (packages released) extends deployment by pushing packaged binaries immediately to production services upon successful build, test and publishing.

    Credits

    Authorship:

    Acknowledgements:

    • This guide was inspired by the comprehensive tooling options available in the CI/CD ecosystem.

    Feedback and Contributions

    Your feedback and contributions are vital to the continuous improvement of this guide. Please see our contribution guidelines for more information.

    - + \ No newline at end of file diff --git a/docs/guides/software-lifecycle/continuous-integration/reference-architecture/index.html b/docs/guides/software-lifecycle/continuous-integration/reference-architecture/index.html index 9df6c3c1..a7d7f4df 100644 --- a/docs/guides/software-lifecycle/continuous-integration/reference-architecture/index.html +++ b/docs/guides/software-lifecycle/continuous-integration/reference-architecture/index.html @@ -11,7 +11,7 @@ - + @@ -101,7 +101,7 @@ mechanism to produce reliable software that is transparent and self-validating. This lends itself to tested, reproducible software deliveries with the crisp reliability and speed of automated systems.

    - + \ No newline at end of file diff --git a/docs/guides/software-lifecycle/continuous-testing/TESTING-example/index.html b/docs/guides/software-lifecycle/continuous-testing/TESTING-example/index.html index 2e332d07..26e5af39 100644 --- a/docs/guides/software-lifecycle/continuous-testing/TESTING-example/index.html +++ b/docs/guides/software-lifecycle/continuous-testing/TESTING-example/index.html @@ -11,13 +11,13 @@ - +

    Jupiter3D Testing

    Introduction

    This document provides an overview of the testing architecture for Jupiter3D. It encompasses continuous testing concepts such as testing across the software development lifecycle as well as automated execution of tests through automation.


    Testing Categories

    The below list of test categories are included in our testing setup. Further details are provided below.

    • Static Code Analysis
    • Unit Tests
    • Security Tests
    • Build Tests
    • Acceptance Tests
    • Integration Tests
    • Performance Tests
    • Usability Tests

    Unit Tests

    Main Tests

    • Location: ./tests/test_main.py
    • Purpose: To test our main script's functions and methods.
    • Running Tests:
      • Manually:
        1. Navigate to the project root directory in the command line.
        2. Execute pytest ./tests/test_main.py.
        3. View Results: Results will appear in the command-line output or can be formatted into a report using the pytest-html plugin.
      • Automatically:
        • Frequency:
          • Triggered by code changes and commits to the src/my_package/main.py file on GitHub.
          • Runs during nightly builds with other unit tests.
        • Results Location: GitHub Actions Unit Test Results
    • Contributing:
      • Framework Used: PyTest
      • Tips:
        • Test every non-trivial function or method in your code
        • Test conditions including malformed arguments and null conditions

    Models

    • Location: ./tests/test_model_*.py
    • Purpose: To test our 3D model rendering code for integrity and functionality
    • Run Tests:
      • Manually:
        1. Navigate to the project root directory in the command line.
        2. Execute pytest ./tests/test_model_*.py.
        3. View Results: Results will appear in the command-line output or can be formatted into a report using the pytest-html plugin.
      • Automatically:
        • Frequency:
          • Triggered by code changes and commits to the src/my_package/test_model_*.py file on GitHub.
          • Runs during nightly builds with other unit tests.
        • Location: GitHub Actions Unit Test Results
      • Contributing:
        • Framework: PyTest
        • Tips:
          • Test each model for edge cases like anti-meridian lines or poles

    Security Tests

    Dependabot

    • Purpose: Ensure our software dependencies are being scanned for vulnerabilities using Dependabot
    • Running Tests:
      • Automatically:
        • Frequency: Daily
        • Results Location: Security tab on repository's GitHub website

    Integration Tests

    Web App API

    • Location: [./tests/integration/web]
    • Purpose: Ensure Web UI software interacts smoothly with other software.
    • Running Tests:
      • Manually:
        1. Install and configure Selenium WebDriver for your target browsers.
        2. Run python ./tests/integration/web/test_suite.py
        3. Review the test execution logs and screenshots captured during the test run.
      • Automatically:
    • Contributing:
      • Framework Used: Selenium
      • Tips:
        • Test the interaction between software components and external APIs

    Performance Tests

    Chaos Testing

    • Location: ./tests/performance/chaos
    • Purpose: Ensure the software is robustly designed to scale and handle expected failures.
    • Running Tests:
      • Manually:
        1. Navigate to ./tests/performance/chaos.
        2. Execute the relevant test scripts for stress / chaos testing.
        3. View results in the output logs or generated reports.
      • Automatically:
        • Frequency:
          • Triggered by significant changes.
          • Quarterly stress tests.
        • Results Location: test deployment machine
    • Contributing:
      • Framework Used: Chaos Monkey
      • Tips:
        • Consider testing both typical and peak usage scenarios.
        • Ensure that performance tests represent real-world conditions as closely as possible.
        • Validate resource utilization thresholds to identify bottlenecks proactively.

    User Interface (UI) Tests

    UI User Experience

    • Location: ./tests/ui
    • Purpose: Ensure that the software meets users' needs and expectations through robust UI design.
    • Running Tests:
      • Manually:
        1. Navigate to ./tests/ui.
        2. Execute the relevant UI test scripts.
        3. View results in the output logs or generated UI testing reports.
      • Automatically:
    • Contributing:
    - + \ No newline at end of file diff --git a/docs/guides/software-lifecycle/continuous-testing/index.html b/docs/guides/software-lifecycle/continuous-testing/index.html index 081655a5..c1916196 100644 --- a/docs/guides/software-lifecycle/continuous-testing/index.html +++ b/docs/guides/software-lifecycle/continuous-testing/index.html @@ -11,7 +11,7 @@ - + @@ -23,7 +23,7 @@ "Generate unit tests for a function that involves string manipulation, including tests for string concatenation, slicing, and length calculations."
  • List/Array Operations: "Create tests for functions that operate on lists/arrays, covering scenarios like element addition, removal, and list comprehensions."
  • Exception Handling: "Generate unit tests to ensure proper exception handling in functions that may encounter errors. Include tests for both expected and unexpected exceptions."
  • Example Generated Unit Test:
  • % ollama run codellama "Generate unit tests for a function/method that performs basic arithmetic operations (addition, subtraction, multiplication, division)"

    import unittest

    class TestArithmeticOperations(unittest.TestCase):
    def test_addition(self):
    result = add(2, 3)
    self.assertEqual(result, 5)

    def test_subtraction(self):
    result = subtract(5, 3)
    self.assertEqual(result, 2)

    def test_multiplication(self):
    result = multiply(4, 6)
    self.assertEqual(result, 24)

    def test_division(self):
    result = divide(10, 5)
    self.assertEqual(result, 2)

    if __name__ == "__main__":
    unittest.main()

    2.2 Example: Writing System Tests using Downloadable, Open Source Code Generation Models and Behavior-Driven Development Frameworks

    System-level tests that involve interaction with external software or files can be complicated to test. Please refer to our System Testing section within our Testing Frameworks Guide for recommended system testing tools that leverage behavior-driven development (BDD) philosophies. One such example, Robot Framework, is demoed below in collaboration with Large-Language Models. Specifically, using LLMs to auto-generate Robot Framework pseudocode can help streamline the process of creating integration test cases. Complicated system testing scenarios can be simplified and written automatically using LLMs. Here's an example:

    1. Generating Robot Framework Pseudocode with LLM:

      • Use Codellama to generate test case pseudocode in Robot Framework syntax.
        % ollama run codellama "Generate a Robot Framework script to perform MFA (Multi-Factor Authentication) login. The script should navigate to the login page, fill in the username and password fields, generate a TOTP code using the provided secret, enter the TOTP code, click the 'Sign in' button, and verify that the login was successful by checking the welcome message."
        *** Settings ***
        Documentation Example test suite
        Library SeleniumLibrary
        *** Test Cases ***
        Valid Login
        Open Browser https://dummy-website.com chrome
        Input Text username_field valid_username
        Input Text password_field valid_password
        Click Button login_button
        Page Should Contain Welcome, User!

        Invalid Login
        Open Browser https://dummy-website.com chrome
        Input Text username_field invalid_username
        Input Text password_field invalid_password
        Click Button login_button
        Page Should Contain Invalid credentials
    2. Direct Revision and Enhancement:

      • Revise the Robot Framework pseudocode as needed:
        • Add additional steps.
        • Include assertions for edge cases.
        • Incorporate custom keywords or libraries.
    3. Test Execution:

      • Run the tests locally or integrate them into your CI pipeline.

    By combining LLM's natural language capabilities with Robot Framework's structured format, you can efficiently create and adapt test cases.

    Here is an example to generate a Robot Framework script for MFA (Multi-Factor Authentication) login with the following steps:

    1. Navigate to the login page.
    2. Fill in the username and password.
    3. Generate a TOTP (Time-Based One-Time Password).
    4. Fill in the TOTP code field.
    5. Click the "Sign in" button.
    6. Verify that the login was successful.

    Here's a simple prompt to generate the script:

    "Generate a Robot Framework script to perform MFA (Multi-Factor Authentication) login. The script should navigate to the login page, fill in the username and password fields, generate a TOTP code using the provided secret, enter the TOTP code, click the 'Sign in' button, and verify that the login was successful by checking the welcome message."

    Here's the LLM output Robot Framework script:

    *** Settings ***
    Library Browser
    Library totp.py
    Suite Setup New Browser browser=${BROWSER} headless=${HEADLESS}
    Test Setup New Context
    Test Teardown Close Context
    Suite Teardown Close Browser

    *** Variables ***
    ${BROWSER} chromium
    ${HEADLESS} False

    *** Test Cases ***
    MFA Login Test
    Open Browser https://example.com/login ${BROWSER}
    Input Text id=username your_username
    Input Text id=password your_password
    ${totp} Generate TOTP your_totp_secret
    Input Text id=totpcode ${totp}
    Click Button id=signin_button
    Wait Until Page Contains Element xpath=//h1[text()='Welcome!']

    Here's the test output:

    % robot --pythonpath . tests
    ==============================================================================
    Tests
    ==============================================================================
    Tests.Mfa Login
    ==============================================================================
    Login with MFA | PASS |
    ------------------------------------------------------------------------------
    Tests.Mfa Login | PASS |
    1 test, 1 passed, 0 failed
    ==============================================================================
    Tests | PASS |
    1 test, 1 passed, 0 failed
    ==============================================================================
    Output: MFA_Login/output.xml
    Log: MFA_Login/log.html
    Report: MFA_Login/report.html

    The quality of the generated test code can be compared with the example provided by Robot Framework here. The website name that you are testing needs to be updated, but other than that, it was perfect!

    3. Automate Your Tests

    Our recommendation is to automate as many of your tests as possible using pre-commit, a framework that manages and maintains multi-language pre-commit hooks that can be used on the client side as well as the server (VCS) side.

    3.1 Static Test Automation

    We recommend setting up a static test using .pre-commit-config.yaml.

    3.2 Component Test Automation

    Component tests refer to tests for your immediate code base, code file, or something that does not require system-level interaction. Please consult our Testing Frameworks guide for a choice of testing tools we recommend. Once selected, we recommend automating the execution of your tests in both of the following ways:

    1. Execute tests locally on your developers' machines upon local Git commits
    2. Execute tests upon Git pushes to given Git branches on your version control system (VCS) - hosted on GitHub.com or alternate

    This idea is represented in the following diagram:

    we recommend using pre-commit. Here's how to set it up:

    Developers' Machines
    • Step 1: Install pre-commit on your local machine. If you are using Python, you can install it via pip:

      pip install pre-commit

    • Step 2: Create a .pre-commit-config.yaml file at the root of your repository with the configuration for your Python component tests using PyTest. Here's an example template you can start with:

      Python

      repos:
      - repo: local
      hooks:
      - id: pytest
      name: PyTest
      entry: pytest
      language: system
      files: '\.py$'
      stages: [commit]

      This configuration assumes that you have PyTest installed and set up for your project. The files regex \ .py$ ensures that the pre-commit hook only runs on Python files.

      HCL (HashiCorp Configuration Language)

      repos:
      - repo: local
      hooks:
      - id: terraform_fmt
      name: Terraform Format
      entry: terraform fmt -check
      language: system
      files: '\.tf$'
      stages: [commit]

      This configuration uses Terraform's built-in fmt command to format Terraform configuration files. While not a direct component test, it's a common practice to ensure code quality and consistency in HCL-based projects.

      JavaScript

      repos:
      - repo: local
      hooks:
      - id: jest
      name: Jest
      entry: npm run test
      language: system
      files: '\.(js|jsx)$'
      stages: [commit]

      This setup assumes you are using Jest for testing your JavaScript projects. The npm run test command should be configured in your package.json to execute Jest tests. If using TypeScript, replace the line files: '\.(js|jsx)$' with files: '\.(ts|tsx)$'.

      Jupyter Notebook

      repos:
      - repo: local
      hooks:
      - id: nbtest
      name: Notebook Test
      entry: jupyter nbconvert --to notebook --execute --inplace
      language: system
      files: '\.ipynb$'
      stages: [commit]

      This configuration uses Jupyter's nbconvert tool to execute notebooks as a form of testing. It's a basic approach to running tests in Jupyter Notebooks and might need additional tooling or scripts for more comprehensive testing scenarios.

    • Step 3: Install the pre-commit hook into your Git repository:

      pre-commit install

      Now, every time you commit changes, your component tests will run automatically on the specified (pattern-matching) files you've staged for commit.

    Version Control System

    For automated execution of component tests upon Git pushes using a VCS, we recommend using GitHub Actions or a configuration for Jenkins:

    To invoke a .pre-commit-config.yml configuration from GitHub Actions or Jenkins for automated execution of unit tests upon Git pushes, follow these detailed directions:

    GitHub Actions

    To execute the pre-commit hooks defined in your .pre-commit-config.yml as part of a GitHub Actions workflow, you will create a workflow file in your repository that triggers push events. Here’s how to set it up:

    1. Create a Workflow File: Navigate to the .github/workflows directory in your repository. If it doesn't exist, create it.

    2. Define the Workflow: Create a new file named pre-commit-action.yml (or another name of your choosing) in the workflows directory. Add the following content to this file:

      name: Pre-commit Hooks

      on: [push]

      jobs:
      run-hooks:
      runs-on: ubuntu-latest
      steps:
      - uses: actions/checkout@v2
      - name: Set up Python
      uses: actions/setup-python@v2
      with:
      python-version: '3.8'
      - name: Install pre-commit
      run: pip install pre-commit
      - name: Run pre-commit hooks
      run: pre-commit run --all-files

    This workflow checks out the code, sets up Python, installs pre-commit, and then runs all the pre-commit hooks defined in .pre-commit-config.yml against all files in the repository. Adjust the python-version and setup steps according to your project's needs.

    Commit and Push: Commit the workflow file to your repository and push it to GitHub. The workflow will automatically trigger on the next push to your repository.

    Jenkins

    To run the pre-commit hooks as part of a Jenkins build, you'll need to configure a Jenkins job that checks out your repository and executes the pre-commit hooks. Here's how to do it:

    1. Install Pre-commit on Jenkins: Ensure that pre-commit and any language-specific runtime (like Python, Node.js) are installed on your Jenkins server or within the build environment that will run your job.

    2. Create a New Jenkins Job: In Jenkins, create a new job by selecting "New Item," then choose "Freestyle project," and give it a name.

    3. Configure Source Code Management: Under the "Source Code Management" tab, select "Git" and fill in the repository URL and credentials if necessary.

    4. Add Build Step to Execute Shell: In the "Build" section, add a build step that executes shell commands. Add the following commands:

      #!/bin/bash
      # Install pre-commit if not already installed; optional based on your setup
      pip install pre-commit

      # Run pre-commit hooks
      pre-commit run --all-files
    5. Save and Run the Job: After configuring the job, save it and run it manually to verify that the pre-commit hooks are executed as expected.

    6. Triggering the Job: You can configure the job to be triggered on each push to your repository by using Jenkins webhooks or polling SCM, depending on your preference and setup.

    3.3 System Test Automation

    System tests refer to tests that require interaction between multiple components. Not every project has this complexity. To aid in the automation of system tests, we suggest doing the following:

    1. Have a schedule for running system tests (e.g., nightly, weekly)
    2. Ensure software is built and published to repositories:
      • Stand-alone components of your software should be independently released, built, or packaged to be published on public repositories
      • (Optional) a final, single build of an integrated software consisting of multiple components is built, packaged, and published to a public repository
    3. Pull built or packaged artifacts from repositories and deploy software release (components or single package) to a virtualized environment
    4. Test the deployed release against a set of specified system tests

    The diagram below illustrates this concept (Continuous Testing parts are highlighted in blue):

    Types of System Tests

    You should outline the types of system tests you plan to implement in your TESTING.md file. We suggest the following types of tests to include:

    • Testing for integration
      • Interaction between software components
      • Interaction with external services, provided files, exchange of messages, etc.
    • Testing for security
    • Testing for performance and load
    • Testing user interfaces for gaps and compliance against policies
    Example: Integration Test Automation

    We recommend the following steps:

    1. For more information about applying integration testing, take a look at the following external guide from Microsoft Engineering Fundamentals.
    2. Follow the process of above diagram for integration test automation.
    3. Integrate your integration tests into a Continuous Integration (CI) pipeline, which will allow for the automatic execution of tests upon code changes.
    Example: Security Test Automation

    To aid in security testing automation, we recommend two steps:

    1. Add security testing to your developers' local coding environment via pre-commit (see Unit Test Automation section above)
    2. Enable existing SLIM security best practices as part of your software development workflow.
    Example: Performance Test Automation

    We recommend the following steps for performance test automation:

    1. Take a look at the following external guide from Microsoft Engineering Fundamentals for more information about applying performance testing.
    2. Refer to the Testing Frameworks page for performance test tools and frameworks.
    3. Integrate performance tests into your Continuous Integration and Continuous Delivery (CI/CD) pipeline to enable regular and automated execution of performance tests as part of the software delivery process.
    Example: User Interface Test Automation

    We recommend the following steps for user interface test automation:

    1. Take a look at the following external guide from Microsoft Engineering Fundamentals for more information about applying user interface testing.
    2. Refer to the Testing Frameworks page for user interface test tools and frameworks.
    3. Set up a CI pipeline to automatically trigger UI test execution upon code changes or at scheduled intervals.

    4. Maintain Your Tests

    Your tests should be updated, at minimum, upon the following events:

    1. Whenever code changes occur (e.g., new features, bug fixes, refactoring), revisit related tests.
    2. Upgrading libraries, frameworks, or testing tools may necessitate adjustments to existing tests.
    3. As your application data evolves, ensure test data remains relevant.
    4. Periodic code coverage analysis identifies underused or obsolete tests.

    This is the list of items to be maintained:

    1. TESTING.md
    2. Component tests
    3. System tests
    4. Test automation

    Frequently Asked Questions (FAQ)

    Q: How does continuous testing address usability and user interface testing?

    A: We acknowledge the importance of usability and UI testing and are actively exploring ways to integrate them seamlessly into our continuous testing model.

    Q: Is it necessary to implement all recommended tools, considering resource constraints?

    A: We understand the challenges, and thus, we're developing a common subset of tools for all projects, prioritizing their importance for more feasible implementation.

    Q: Are certain projects not mature enough for specific tools?

    A: Yes, we recognize project maturity levels vary. We recommend waiting until your project reaches an appropriate stage, especially for tools like integration testing.

    Q: What phases are prioritized in the continuous testing guidelines?

    A: Security, verification and validation, and integration testing are considered essential phases and will be prioritized in our guidelines.

    Q: How does licensing factor into the tool selection process, even for open-source tools?

    A: Licensing is crucial, and we are actively exploring strategies to address licensing concerns, ensuring compliance even with open-source tools.

    Q: Is continuous testing a one-time implementation, or can it be an iterative process?

    A: We emphasize iterative implementation for continuous testing success, understanding that refining the process takes time.


    Credits

    Authorship:

    Acknowledgements:

    • We are grateful to John Engelke and Dillon Dalton for their insightful comments and feedback, which have greatly improved this work.
    • We also appreciate Drew Meyers and Luca Cinquini for providing exemplary best practices for various tests and pre-commit hooks.

    Feedback and Contributions

    We welcome feedback and contributions to help improve and grow this page. Please see our contribution guidelines.

    - + \ No newline at end of file diff --git a/docs/guides/software-lifecycle/continuous-testing/testing-frameworks/index.html b/docs/guides/software-lifecycle/continuous-testing/testing-frameworks/index.html index 6739ceb5..34b160c3 100644 --- a/docs/guides/software-lifecycle/continuous-testing/testing-frameworks/index.html +++ b/docs/guides/software-lifecycle/continuous-testing/testing-frameworks/index.html @@ -11,13 +11,13 @@ - +

    Testing Frameworks

    Continuous Testing

    For Code Development

    For Module Testing (xUnit)

    • Java
      • JUnit: A popular Java testing framework for unit and integration testing.
    • JavaScript/TypeScript
      • Jest: A widely used JavaScript testing framework for unit and integration testing.
    • Python
      • PyUnit: The built-in unit testing library for Python.
      • PyTest: A popular Python testing framework for unit and functional testing.
    • C#
      • NUnit: A widely used testing framework for .NET applications.
    • C/C++

    For Mocking, Stubbing, and Simulating Test Services

    • Java
      • Mockito: A Java mocking framework for unit testing.
      • EasyMock: A library for creating mock objects in unit tests.
      • JMock: A framework for mocking Java interfaces.
    • Python
      • PyTest: A popular Python testing framework with mocking support.
      • Nose2: A test discovery and execution framework for Python.
      • Mock: A Python library for mocking objects and behavior.
    • C#
      • Moq: A mocking framework for .NET.
      • NSubstitute: A friendly substitute for .NET mocking libraries.
      • FakeItEasy: An easy-to-use mocking framework for .NET.
    • C/C++
    • HTTP Services
      • WireMock: A tool for mocking HTTP services, useful for simulating APIs and services during testing.

    For Code Analysis

    For Static Analysis

    • Java
      • PMD: A source code analyzer for Java, JavaScript, and more.
      • Checkstyle: A tool for checking Java code against coding standards.
      • Spotbugs: A static analysis tool for Java bytecode.
      • SonarQube: A tool combining multiple analyses in a single pass.
    • JavaScript/TypeScript
      • ESLint: A widely used JavaScript and TypeScript linting tool for identifying and fixing code issues.
      • PMD: A source code analyzer for Java, JavaScript, and more.
      • SonarQube: A comprehensive platform for continuous inspection of code quality and security.
    • Python
      • PyLint: A Python static code analysis tool.
      • Pyflakes: A lightweight Python code checker.
    • C#
      • StyleCop: Enforces style and consistency rules in C# code.
      • Roslynator: A set of code analyzers, refactorings, and code fixes for C#.
      • XunitAnalyzer: Analyzers to improve the quality of xUnit.net tests.
      • SonarAnalyzer: A comprehensive platform for continuous inspection of code quality and security.
    • C/C++

    For Dynamic Analysis

    For Test Coverage
    • Java
    • Python
      • Coverage: A Python code coverage measurement tool.
      • PyTest-cov: A PyTest plugin for coverage reporting.
    • C#
      • AltCover: A .NET code coverage tool.
      • NCover: A code coverage tool for .NET.
    • C/C++
    • JavaScript/TypeScript
      • Istanbul: A code coverage tool for JavaScript and TypeScript.
      • Jest Coverage: Code coverage support in Jest.
    For Complexity Analysis and Runtime Performance

    For Security

    For Web Applications

    For Service Endpoints (APIs)

    For RESTful Endpoints

    • JMeter: An Apache tool for performance testing.
    • Postman: A popular API testing tool.
    • REST Assured: A Java library for RESTful API testing.
    • SOAPUI: A comprehensive API testing tool.

    For RPC/Remote Processing

    For User Interfaces (UIs)

    • Robot Framework: An open-source test automation framework.
    • Selenium: A popular web testing framework.
    • Cypress: An end-to-end testing framework.
    • Playwright: A browser automation library.
    • Serenity BDD: A test automation framework that combines Selenium, JUnit, and BDD practices.
    • Allure: An open-source framework for test report generation with interactive and informative reports.

    For Cross-Browser Testing

    For Security

    • OWASP ZAP: An open-source security testing tool for finding web application vulnerabilities.
    • Burp Suite: A popular security testing tool for web application security assessment.

    For Performance Testing

    For Load Testing

    • JMeter: An Apache tool for performance testing.
    • Gatling: A high-performance load testing tool.
    • Locust: An open-source load testing tool.

    For Acceptance Testing

    • Cucumber: A collaboration and automation platform for Behavior-Driven Development (BDD).
    • Chaos Monkey: A tool for testing system resilience.
    • Robot Framework: An open-source test automation framework.

    For Mobile Testing

    For All Platforms

    • Appium: An open-source mobile automation framework.

    For Android

    For iOS

    For Reporting

    For Unified Analyses (Dynamic and Static)

    • SonarQube: An open-source platform for continuous inspection of code quality.
    • Scrub/Unified: Tools for code analysis and security scanning.

    For Status Reporting

    For Real-time Validation and Stability

    • Jenkins: An automation server for building, testing, and deploying.
    • GitHub Actions: CI/CD workflows powered by GitHub.
    • Docker Container: Containerization technology for managing test environments.

    For AI/ML Testing

    • TensorFlow Test: A testing framework for machine learning models developed using TensorFlow.

    For System Testing

    • Robot Framework: An open-source automation framework that supports both test automation and test process automation.
    • pytest-bdd: A behavior-driven testing framework for Python, built on top of pytest.

    For Test Code Generation

    Downloadable Open-Source Models

    • Ollama: A streamlined tool for running various large language models.
    • codellama: A large language model-based code generation tool.
    • StarCoder: A popular large language model for code generation, particularly suited for multi-language support.
    • Llama3: A language model designed for code generation and other tasks.

    Cloud-Based Models

    ⚠️ Exercise caution when using sensitive data

    • GPT-4: The latest version of Generative Pre-trained Transformers (GPT), a type of deep learning model used for natural language processing and text generation.
    • Claude: One of the leading models with longer context windows than GPT-4, allowing it to maintain coherence and understanding over longer sequences of text.
    - + \ No newline at end of file diff --git a/docs/guides/software-lifecycle/metrics/index.html b/docs/guides/software-lifecycle/metrics/index.html index 979d9c92..8fbd2adf 100644 --- a/docs/guides/software-lifecycle/metrics/index.html +++ b/docs/guides/software-lifecycle/metrics/index.html @@ -11,14 +11,14 @@ - +

    Metrics

    A Guide for Configuring and Deploying Software Lifecycle Metrics Tracking.

    slim-dashboard Example Metrics Dashboard using Apache DevLake

    Introduction

    Metrics collection is important for project management and software quality assurance. We recommend Apache DevLake for easy tracking and analysis. This guide simplifies its installation and configuration, especially for developers new to metrics collection.

    Use Cases:

    • Collecting and analyzing DORA metrics along with many others for your project.
    • Creating a visual dashboard to view metrics from multiple sources (e.g., GitHub, JIRA) in one place.
    • Streamlining the setup and configuration of Apache DevLake through a single-command setup step.
    • Gain insight into organizational and project performance for software development and the overall software lifecycle.

    Why We Chose Apache DevLake:

    Our decision to select Apache DevLake was informed by thorough trade study documentation, available here.


    Prerequisites

    • Familiarity with Docker as well as a running instance of it
    • A familiarity with validated software metrics is not required for this tool but it is recommended

    Quick Start

    To quickly deploy DevLake on one of your servers or locally for testing, we've developed a convenient 1-step command. Please ensure Docker is running on your system before executing this command.

    The purpose of this script is to automate the installation process DevLake recommends here. The script does the following:

    • Checks for necessary software: ensures you have Docker and docker-compose installed to run DevLake.
    • Downloads required files: automatically retrieves setup files if they're not already present on your system.
    • Prepares setup files: adjusts file permissions and sets up the necessary environment variables for DevLake.
    • Secures the setup: generates a unique encryption key for data security.
    • Starts DevLake: uses Docker to initialize the DevLake application in the background and guides you to visit a web address to start using DevLake for data analysis and viewing dashboards.

    Run This Command in Your Terminal and Then Move on to the Next Step of the Configuration Guide:

    cd /path/to/your/chosen/deployment/directory
    /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/NASA-AMMOS/slim/main/docs/guides/software-lifecycle/metrics/metrics-starter-kit/install_devlake.sh)"

    To Stop Services:

    1. Navigate to the directory containing your docker-compose.yml file (where you ran the above command).

    2. Run the following command to gracefully stop all containers defined in the docker-compose.yml file:

      docker-compose down

    To Restart Services:

    1. Navigate to the directory containing your docker-compose.yml file.

    2. Run the following command to start containers for services defined in the docker-compose.yml file:

      docker-compose up -d

      The -d flag runs containers in detached mode, allowing them to run in the background.


    Step-by-Step Configuration Guide

    1. Run the Quick Start steps above.
    2. Once you have a working DevLake instance, we recommend going through DevLake's official start guide step-by-step, beginning with the data sources section.
    3. If you're interested in sharing your dashboards with your community but are unable to host a server, you can export your dashboards by following the instructions provided here.

    There are two additional topics we'd like to emphasize. Our recommendations for data sources and metrics to collect.

    We recommend, at a minimum, connecting the following data sources (see the DevLake docs on configuring data sources for further assistance):

    See this list of metrics on the DevLake documentation guide for why certain metrics are important and how to collect them. As a minium, we recommend the following metrics should be collected for your projects:

    • Change Failure Rate: "The percentage of changes that were made to a code that then resulted in incidents, rollbacks, or any type of production failure."
    • Lead Time for Changes: "The median amount of time for a code change to be deployed into production."

    Frequently Asked Questions (FAQ)

    • Q: How do I customize the DevLake Quick Start script for more functionality?

    • A: If you have already provided DevLake with a data source, you can further configure your dashboard by following this guide. Use simple queries to gather the information you need.

    • Q: How do export the Grafana dashboard to a PDF?

    • A: You can use this tool.


    Credits

    Authorship:


    Feedback and Contributions

    We value your feedback and welcome contributions to improve this guide. Please see our contribution guidelines.


    Acknowledgements:

    • The NISAR and SWOT missions for their experience deploying and using DevLake.
    - + \ No newline at end of file diff --git a/docs/guides/software-lifecycle/security/container-vulnerability-scanning/index.html b/docs/guides/software-lifecycle/security/container-vulnerability-scanning/index.html index e61fbef2..0c391b46 100644 --- a/docs/guides/software-lifecycle/security/container-vulnerability-scanning/index.html +++ b/docs/guides/software-lifecycle/security/container-vulnerability-scanning/index.html @@ -11,13 +11,13 @@ - +

    Container Vulnerability Scanning

    A guide to scanning containers and container repositories for security vulnerabilities both manually and automatically.

    banner-image

    Introduction

    Background: To maintain the integrity and security of your containers in production environments, it's essential to monitor dependency vulnerabilities. Third-party software dependencies can harbor security vulnerabilities. This guide focuses on utilizing Grype, an open source vulnerability scanner, to proactively detect vulnerabilities in dependencies defined within container images as well as generally within repositories that use package managers.

    Use Cases:

    • Scanning container images for vulnerabilities during the development phase
    • Ensuring base container images are as vulnerability-free as possible
    • Scanning package-manager defined software dependencies (e.g. NPM, YARN, Maven, etc.) for vulnerabilities during the development phase
    • Automating vulnerability detection in repositories

    Prerequisites

    Software:

    • OCI compliant containers (e.g. Docker, Podman) or other package-manager software dependencies
    • pre-commit framework

    Skills:

    • Basic knowledge of Git hooks and Docker commands
    • Understanding of YAML for pre-commit configuration

    Quick Start

    Run a local scan of your container's repository (folder containing the Dockerfile) using Grype

    grype dir:.

    Run a local scan of a Docker image using Grype

    First, build the Docker image:

    docker build -t my-app:latest .

    Then, scan the built Docker image:

    grype my-app:latest

    ⬇️ Grype Scanning .pre-commit-config.yml

    Download the file above to access the pre-commit configuration file, which includes an example hook for Grype vulnerability scanning. This file should be placed within your local Git repository after installing the pre-commit framework.

    ⬇️ Grype GitHub Action

    GitHub Action for Grype vulnerability scanning.


    Step-by-Step Guide

    Step 1: Scan Locally for Container Vulnerabilities

    1. Ensure Grype is installed on your system. You can install Grype from the official repository.

      grype version
    2. Perform a scan of the local repository for vulnerabilities. The below checks for vulnerabilities via any common package managers that are detected in your repository. See Grype supported sources for more information.

      grype dir:.
    3. To scan a Docker image, first build the Docker image:

      docker build -t my-app:latest .
    4. Then, perform a scan of the built Docker image:

      grype my-app:latest
    5. If you find vulnerabilities, fix them via your package manager.

    Step 2: Setup Automated Local Scanning of Container Vulnerabilities

    ⚠️ NOTE: We recommend installing this pre-commit hook only if you have downloaded grype, already scanned your repository and addressed any vulnerabilities.

    ⚠️ NOTE: The automated scan described below will NOT check for image vulnerabilities, rather, it uses the package dependency capability of Grype to look for third-party dependencies via grype dir:.

    The below steps, once enacted, will ensure that any local git push actions taken will be followed by an automated vulnerability scan. If vulnerabilities at the CRITICAL level are found, the push will be blocked by default.

    1. Install the pre-commit framework via Python:

      pip install pre-commit
    2. Create a .pre-commit-config.yaml file in the root directory of your Git repository with the following content for Grype scanning:

      repos:
      - repo: local
      hooks:
      - id: grype-cve-scan
      name: Grype Vulnerability Scan
      description: Scans for dependency vulnerabilities. Fails if CRITICAL vulnerabilities detected.
      entry: python -c "import os; import subprocess; import sys; os.environ['GRYPE_DB_AUTO_UPDATE'] = 'false'; result=subprocess.run(['grype', 'dir:.', '--fail-on', 'critical'], capture_output=True); print(result.stdout.decode()); print('CRITICAL level vulnerabilities found. To address issues, run scan via `grype dir:.`, then `git add` followed by `git commit` your fix or ignore via `git commit --no-verify`') if result.returncode != 0 else print('No CRITICAL level vulnerabilities found.'); sys.exit(result.returncode)"
      language: system
      verbose: true
      stages: [pre-push]
    3. Initialize pre-commit in your repository with the new configuration:

      pre-commit install
    4. Grype-based vulnerability scanning should run every time a git push is invoked. The push will be blocked if CRITICAL level vulnerabilities are found and will ask the developer to fix them prior to committing. Otherwise a report will be provided for reference.

    Step 3: Set Up Automated Repository Scanning

    For GitHub users, we recommend:

    • Installing the official Grype GitHub action to set up automated dependency vulnerability scanning. The tool is available at this link.
    • Setting up GitHub's official Dependabot action to also look for vulnerabilities. See our GitHub Security Guide on this.

    Frequently Asked Questions (FAQ)

    Q: What happens if the pre-commit scan finds vulnerabilities?

    A: The pre-commit hook will prevent you from committing changes until the vulnerabilities are resolved. The scan is set to alert only for critical vulnerabilities by default to minimize disruption.

    Q: What if I want to skip the pre-commit scan temporarily?

    A: You can bypass the hook by using the --no-verify flag with the git commit command, though this is generally not recommended.

    Q: Is it possible to run vulnerability scans without pre-commit hooks?

    A: Yes, you can incorporate scans into your CI/CD pipeline or utilize other repository scanning tools, which can prevent pushing vulnerable code.

    Q: What's the difference between Grype and GitHub's Dependabot? Why do I need both?

    A: Grype relies on free and open software vulnerability databases whereas GitHub's Dependabot may be using proprietary methods. In our testing, we've found some non-overlapping vulnerabilities that are sometimes found in one tool but not the other.


    Credits

    Authorship:

    Acknowledgements:

    • OPERA SDS Project for implementation guidance
    • @ddalton-swe for tool suggestions

    Feedback and Contributions

    We welcome feedback and contributions to enhance this guide further. Please refer to our contribution guidelines.

    - + \ No newline at end of file diff --git a/docs/guides/software-lifecycle/security/github-security/index.html b/docs/guides/software-lifecycle/security/github-security/index.html index 403ff1be..19a98278 100644 --- a/docs/guides/software-lifecycle/security/github-security/index.html +++ b/docs/guides/software-lifecycle/security/github-security/index.html @@ -11,13 +11,13 @@ - +

    GitHub Security Best Practices

    Recommendations for enabling GitHub security features for your repositories.

    gh-security-screenshot-example

    Example GitHub security tools rendering

    Introduction

    Background: GitHub offers a suite of security features to help maintainers and developers protect their code and ensure the safety of their repositories. From automatically detecting vulnerabilities in dependencies to scanning for secrets and setting security policies, these tools are essential for any project, especially in today’s security-conscious environment.

    Use Cases:

    • Being alerted over e-mail or GitHub notifications about known vulnerabilities in your dependencies and having pull-requests automatically created to resolve the issues.
    • Being alerted if your dependencies have updated versions available.
    • Being alerted if your commits have potentially harmful secrets or sensitive information within the code - including being blocked from pushing your commits.

    Prerequisites

    • A GitHub repository
    • Familiarity with GitHub’s user interface
    • Admin rights for certain security configuration tasks
    • Team discussion: before diving into any configurations, we recommend engaging with your development team about the importance of GitHub’s security features. Establish a consensus on which ones to prioritize and implement.

    Quick Start

    The fastest way to enable recommended GitHub Security features is to perform it in bulk for all of your repositories within a given organization. Consult Enabling security features for multiple repositories for details. Organization administrative-level access is required.

    We recommend enabling the below features for all your repositories:

    img

    Specifically:

    • Dependency graphs (select "Enable All")
      • Select "Automatically enable for new private repositories"
    • Dependabot Alerts (select "Enable All")
      • Select "Automatically enable for new repositories"
    • Dependabot Security Updates (select "Enable All")
      • Select "Automatically enable for new repositories"
    • Code Scanning (select "Enable All")
      • Select the default "CodeQL high-precision queries" option

    If you do not have organizational permissions or if you wish to customize security features per repository, see our Step-by-Step guide below for repository-specific guidance.

    Step-by-Step Guide per Repository

    1. Set Up Dependabot:
      • Navigate to your repository and click on the Settings tab.
      • From the left sidebar, select the Code security and analysis menu.
      • Under the "Dependabot" section:
        • We recommend enabling Dependabot alerts to stay informed about insecure dependencies in your project.
        • For added security, we suggest turning on Dependabot security updates to automatically generate pull requests for known vulnerabilities in your dependencies.
        • We also recommend enabling Dependabot version updates if you are using a package manager for your project. This will help you keep your dependencies up-to-date. To configure Dependabot version updates:
          1. Create a .github/dependabot.yml file in your repository.
          2. Specify the package-ecosystem, directory, schedule and branch to update. For example, the below demonstrates a Python dependabot.yml example from the SLIM Python Starter Kit:
            version: 2
            updates:
            - package-ecosystem: 'pip'
            directory: '/' # location of package manifests
            schedule:
            interval: 'daily'
            time: '09:00'
            timezone: 'America/Los_Angeles'
            target-branch: 'main'
            labels:
            - 'dependencies'
      • To view Dependabot alerts and version updates:
        • Head back to the main page of your repository.
        • Click on the Security tab. Here, you can select Dependabot alerts to view security alerts, and you can see version updates in the Pull requests tab labeled with "Dependabot".
    1. Enable Code Scanning:

      • In the Code security and analysis menu from the Settings tab, click the "Set Up" or enable the following workflows:
        • CodeQL Analysis workflow: a free tool provided by GitHub that scans your code for vulnerabilities across a variety of languages. Simply choose a CodeQL Analysis template (default is acceptable) and follow the instructions.
      • To view Code scanning alerts:
        • Return to the repository main page.
        • Click on the Security tab and select Code scanning alerts.
    2. Enable Secret Scanning:

      • In the Code security and analysis menu from the Settings tab:
        • Click on the Secret scanning enable button.
        • We recommend enabling "Push protection" for blocking commits containing secrets
      • To view Secret scanning alerts:
        • Navigate to the repository main page.
        • Click on the Security tab and select Secret scanning alerts.

    Frequently Asked Questions (FAQ)

    • Q: Can these security features be used outside of GitHub?

      A: This guide specifically focuses on GitHub’s ecosystem. While some tools might have external equivalents, the integrations and configurations here are GitHub-specific.

    • Q: Are these security features available on GitHub Enterprise?

      A: It depends on your institution's particular version of GitHub deployed. You'll have to check your Settings tab to view the features that are provided. GitHub.com is the most up-to-date version we recommend for.

    • Q: If I receive security alerts, what should I do and how soon should I act?

      A: When you receive a security alert, it indicates a potential vulnerability in your repository. First, review the details of the alert to understand the severity and the affected component. Address critical vulnerabilities immediately, as they can pose a significant risk to your project. For less severe alerts, plan to address them in a timely manner. Always keep in mind that the sooner you act on security alerts, the better you can protect your code and users from potential threats.

    Credits

    Authorship:

    Acknowledgements:

    • GitHub for providing the security features and related documentation. See GitHub’s Security Features to access an overview of the suite of security features GitHub provides for repositories.
    • OWASP DevSecOps Guideline for providing a Shift Left strategy to secure all phases of development.

    Feedback and Contributions

    We welcome feedback and contributions to help improve and grow this guide. Please see our contribution guidelines.

    - + \ No newline at end of file diff --git a/docs/guides/software-lifecycle/security/secrets-detection/index.html b/docs/guides/software-lifecycle/security/secrets-detection/index.html index bf3f367c..806873b2 100644 --- a/docs/guides/software-lifecycle/security/secrets-detection/index.html +++ b/docs/guides/software-lifecycle/security/secrets-detection/index.html @@ -11,13 +11,13 @@ - +

    Secrets Detection

    Guide to identify and automatically prevent leaking of sensitive information into your codebase.

    secrets-screenshot-example

    Example secrets scanning rendering

    Introduction

    Background: Sensitive information like API keys, passwords or tokens may be inadvertently committed to your repository. Such slip-ups can pose significant security risks. We recommend not only recurring scans for sensitive information, but proactively preventing sensitive information from getting infused. To support these goals, we recommend a tool called detect-secrets that mitigates these risks. It scans for common sensitive information categories like passwords and other high-entropy values that contain sensitive data. It also provides a plugin system to support additional customization. It's fast for use in continuous integration pipelines and quickly executes on local-developer machines. It uses a "baseline file" approach, leveraging .secrets.baseline, that streamlines management of legitimate secrets and reduces false positives. This helps both new and established projects detect and prevent secrets from entering the code base.

    Use Cases:

    • Finding and preventing commits of sensitive information such as:
      • Username / passwords
      • High entropy strings
      • IP addresses
      • E-mail addresses
      • AWS sensitive information
    • Scanning local client repositories for exposed sensitive information before making them public.
    • Preventing secrets from being committed to a local repository using pre-commit hooks.
    • Implementing a safety net in continuous integration (CI) pipelines using GitHub Actions to catch inadvertent secret commits.
    • Streamlining the management of known secrets and false positives during codebase audits.

    Prerequisites

    To get the most out of detect-secrets, you'll need:

    • Python 3 with the pip tool installed.
    • (Optional) Familiarity with Python for potential custom plugin development.
    • (Optional) A GitHub repository supporting GitHub Actions.

    Quick Start

    1. Install slim-detect-secrets:

      ℹ️ Note: the SLIM project has customized the Detect Secrets tool to identify additional sensitive keywords such as IP addresses, file paths, and AWS information. These additions are currently under review by the detect-secrets team for merge into the tool's main codebase. Until then we recommend using our SLIM fork as described below.

      pip install git+https://github.com/NASA-AMMOS/slim-detect-secrets.git@exp
    2. Execute a baseline scan:

      detect-secrets scan --all-files --disable-plugin AbsolutePathDetectorExperimental --exclude-files '\.secrets.*' --exclude-files '\.git*' > .secrets.baseline
    3. Review the .secrets.baseline file for any detected secrets via an audit:

      detect-secrets audit .secrets.baseline

    ⬇️ Secrets detection .pre-commit-config.yml

    Download the file above to access the pre-commit configuration file, which includes an a scan for sensitive information upon Git pushes. This file should be placed within your local Git repository after installing the pre-commit framework.

    Additional steps like whitelisting accepted values and false positives, establishing pre-commit hooks and/or enabling further automation are covered in detail below.


    Step-by-Step Guide

    There are three recommended layers of protection we suggest you enable to ensure comprehensive security. Please see below sections for further details.

    Layer 1: Full Scan and Audit (Client-side)

    This layer directly scans the developer's local environment using the detect-secrets tool. After scanning, a baseline file containing detected secrets is generated. Developers can audit this file for detailed information on detected secrets.

    Steps

    1. Installation

      • Install the experimental version of slim-detect-secrets.
        pip install git+https://github.com/NASA-AMMOS/slim-detect-secrets.git@exp
    2. Scanning

      • Scan all local files from the current directory and output results to a baseline file.
        detect-secrets scan --all-files --disable-plugin AbsolutePathDetectorExperimental --exclude-files '\.secrets.*' --exclude-files '\.git*' > .secrets.baseline
    3. Checking Results

      • View the results in the baseline file.
        cat .secrets.baseline
    4. Analysis

      • Analyze results using the audit tool.
        detect-secrets audit .secrets.baseline

    View more on Auditing Secrets in Baseline

    ℹ️ Note: If you've marked any secrets as true positives, make sure to remove all references to these secrets and rerun a full scan.

    Layer 2: Git Commit Scan (Client-side)

    This layer represents a prevention mechanism in the local developer environment that scans changes when a developer tries to commit and if new secrets are detected, the commit is blocked.

    To support this strategy, we recommend the installation of another third party tool called pre-commit, which is integral in allowing specialized plugins to run during the local developer's commit phase of using Git. It allows detect-secrets to prevent commits that are flagged with sensitive information.

    Steps

    1. Installation

    2. Configuration

      • Create a .pre-commit-config.yaml configuration file with the below contents.
        repos:
        - repo: https://github.com/NASA-AMMOS/slim-detect-secrets
        # using commit id for now, will change to tag when official version is released
        rev: 91e097ad4559ae6ab785c883dc5ed989202c7fbe
        hooks:
        - id: detect-secrets
        args:
        - '--baseline'
        - '.secrets.baseline'
        - '--exclude-files'
        - '\.git*'
        - '--exclude-files'
        - '\.secrets.*'
    3. Hook Installation

      • Install the pre-commit hook into your local environment, ensuring the hook gets invoked during local git commits.

        pre-commit install
    4. Committing Changes

      • Commit changes. If new secrets are detected, the commit will be blocked.

    ℹ️ Note: The pre-commit hook does not automatically update the .secrets.baseline file. Update it by re-running the scan command.

    Layer 3: Server-side Push to GitHub.com

    This strategy provides a final layer of protection by scanning server-side commits for sensitive information during pull request creation. It leverages the pre-commit tool and GitHub Action. The scan is triggered during a push or pull request and any detected new secrets are reported while blocking merges or pushes to protected branches.

    Steps

    1. Workflow Creation

      • The first step is to create a detect-secrets.yaml workflow file in the .github/workflows directory to define the GitHub action. Copy and paste the below while ensuring the correct branch of your codebase is referenced. For example (from the Slim Python Starter Kit):
      name: "Secret Detection"
      on:
      push:
      branches: [main]
      pull_request:
      # The branches below must be a subset of the branches above
      branches: [main]

      jobs:
      secret-detection:
      name: Secret-Detection
      runs-on: ubuntu-latest
      permissions:
      actions: write
      contents: read
      security-events: write
      steps:
      - name: Checkout repository
      uses: actions/checkout@v4
      with:
      fetch-depth: 0
      - name: Upgrade tooling
      run: |
      python3 -m pip install --upgrade pip
      pip install --upgrade git+https://github.com/NASA-AMMOS/slim-detect-secrets.git@exp
      pip install --upgrade jq
      - name: Create baseline config
      run: |
      if [ ! -f .secrets.baseline ] ;
      then
      # This generated baseline file will only be temporarily available on the GitHub side and will not appear in the user's local files.
      # Scanning an empty folder to generate an initial .secrets.baseline without secrets in the results.
      echo "⚠️ No existing .secrets.baseline file detected. Creating a new blank baseline file."
      mkdir empty-dir
      detect-secrets scan empty-dir > .secrets.baseline
      echo "✅ Blank .secrets.baseline file created successfully."
      rm -r empty-dir
      else
      echo "✅ Existing .secrets.baseline file detected. No new baseline file will be created."
      fi
      - name: Scan
      run: |
      # scripts scan repository for new secrets
      # backup list of known secrets
      cp -pr .secrets.baseline .secrets.new
      # find secrets in the repository
      detect-secrets scan --disable-plugin AbsolutePathDetectorExperimental --baseline .secrets.new \
      --exclude-files '\.secrets..*' \
      --exclude-files '\.git.*' \
      --exclude-files '\.mypy_cache' \
      --exclude-files '\.pytest_cache' \
      --exclude-files '\.tox' \
      --exclude-files '\.venv' \
      --exclude-files 'venv' \
      --exclude-files 'dist' \
      --exclude-files 'build' \
      --exclude-files '.*\.egg-info'
      # break build when new secrets discovered
      # function compares baseline/new secrets w/o listing results -- success(0) when new secret found
      compare_secrets() { diff <(jq -r '.results | keys[] as $key | "\($key),\(.[$key] | .[] | .hashed_secret)"' "${1}" | sort) <(jq -r '.results | keys[] as $key | "\($key),\(.[$key] | .[] | .hashed_secret)"' "${2}" | sort) | grep -q '>' ; }
      # test baseline versus new secret files
      if compare_secrets .secrets.baseline .secrets.new;
      then
      echo "⚠️ Attention Required! ⚠️" >&2
      echo "New secrets have been detected in your recent commit. Due to security concerns, we cannot display detailed information here and we cannot proceed until this issue is resolved." >&2
      echo "" >&2
      echo "Please follow the steps below on your local machine to reveal and handle the secrets:" >&2
      echo "" >&2
      echo "1️⃣ Run the 'detect-secrets' tool on your local machine. This tool will identify and clean up the secrets. You can find detailed instructions at this link: https://nasa-ammos.github.io/slim/continuous-testing/starter-kits/#detect-secrets" >&2
      echo "" >&2
      echo "2️⃣ After cleaning up the secrets, commit your changes and re-push your update to the repository." >&2
      echo "" >&2
      echo "Your efforts to maintain the security of our codebase are greatly appreciated!" >&2
      exit 1
      else
      echo "🟢 Secrets tests PASSED! 🟢" >&1
      echo "No new secrets were detected in comparison to any baseline configurations." >&1
      exit 0
      fi

      ℹ️ Explanation: The GitHub Action checks out code, installs necessary packages, checks for a baseline file, and scans the repository for secrets. If new secrets are detected, the build fails and provides guidance.

    After setting this up, GitHub will run the workflow during pushes or pull requests. If any new secrets are detected, the status check will fail and the user will be notified in the pull request.

    ⚠️ Warning: The check ensures specific lines of code that may contain sensitive information are not disclosed publicly. In GitHub Action logs only a yes/no indication of sensitive information appears. However, the surface area exists for potential attackers to readily identify sensitive information. Monitor your pull requests actively to respond and always ensure your team actively uses Layer 1 and Layer 2 to mitigate issues in the first place.


    Frequently Asked Questions (FAQ)

    • Q: If secrets are detected in my code, what should I do?

      A: Follow these steps:

      • Identify and Confirm: Review the identified secrets in the .secrets.baseline or any other report generated. Ensure that they are indeed secrets and not false positives.
      • Removal: Remove or replace all references to the detected secrets from your codebase. Ensure that no trace of the secret remains in the code, comments, or commit history. If you want to ignore the secret as a false positive during a pre-commit scan, you can follow directions here.
      • Rotation: If the detected secret was an API key, password, or any other form of authentication, consider it compromised. Rotate the secret immediately, i.e., generate a new secret/key and update it wherever required.
      • Rerun Scans: After you've made the necessary changes, run the detect-secrets tool again to ensure no secrets remain.
      • Commit Safely: When you're sure all secrets have been removed, you can safely commit your changes. Remember, the Git commit scan (Layer 2) and the server-side push scan (Layer 3) will provide additional layers of checks.
      • Educate and Prevent: To avoid such instances in the future, educate your team on the importance of not committing secrets and the potential risks associated with it. Consider adopting practices or tools that prevent the accidental inclusion of secrets in your codebase.
    • Q: Does detect-secrets scan the entire Git history?

      A: No, it's designed to scan the current state of a project for efficiency.

    • Q: How are commits containing secrets removed permanently from Git history?

      A: The process of scrubbing errant commits and their content involves a destructive rewrite of repository commit history. Backups are essential and changes must be handled with precision and caution. One solution is to start a new repository from scratch with only the latest cleaned code, thereby negating the need to change existing repository content. Solutions involving more entropy include Git filter commands or well-known cleaning applications, such as BFG Repo-Cleaner. Because of the risks involved in mutating repository history and content, such changes always must be handled with backups, expertise and extreme care.

    • Q: Where can I find more configurations and options for detect-secrets?

      A: Refer to the official documentation for detect-secrets and pre-commit.


    Credits

    Authorship:

    Acknowledgements:


    Feedback and Contributions

    We value your feedback and contributions. Enhance and expand this guide by referring to our contribution guidelines.

    - + \ No newline at end of file diff --git a/docs/join/index.html b/docs/join/index.html index 20242936..600f207a 100644 --- a/docs/join/index.html +++ b/docs/join/index.html @@ -11,14 +11,14 @@ - +

    Join

    SLIM best practice guides and recommendations are community generated, which means you can join our community!

    Some ways you can get involved with SLIM:

    Use Our Guides

    The fastest way to join the SLIM community is to start using our best practice guides in your project(s).

    As you use our best practices, we always recommend working with your project by creating pull-requests. An excellent tool to automate the infusion of our best practices in your repositories (especially if you have many) is to use a tool like multi-gitter if you're using Git to create pull-requests.

    Some Pro ⭐️ tips:

    • Add the following badge to your README.md so that your other team members can help: SLIM
    • 👀 Watch our repo so you get updates on activities right away. In particular, ensure your notifications are set up to monitor our Releases page for key updates.

    Join Our Community of Projects

    What does it mean to join the SLIM community as a member project? It means the SLIM project will reach out to you about new updates and best practices and offer automated pull-requests for select best practices to be sent to your project's repositories (at no cost!). We welcome new project members who are keen on improving software lifecycle processes within their projects.

    For instance:

    • See our current community members in our About page here.
    • See an example of automated pull requests in-action for our community member repos here.

    Here's how you can tell us you're interested in joining:

    1. Publicly: Start a Discussion: Head over to our discussion forum and file a new thread expressing your interest in joining SLIM. This is a great way to introduce your project and outline how you think SLIM could benefit you.

    2. Privately: Contact Us: You can directly contact @riverma or the @slim-steering group on GitHub to express your desire to join. We'll have a chat, and try to identify which repositories in your project make sense for best practice infusion as well as identify a point-of-contact.

    Contribute to Our Guides

    We'd be delighted to see your contribution! Please see our Contributing Guide on details for how to contribute. We accept many non-code contributions as well, so feel free to think creatively.

    Spread the Word

    The more people & projects using, contributing, and maintaining SLIM, the more robust and long-term this effort will last.

    You can help by:

    Socializing on GitHub

    ⭐ Star our Repo

    👀 Watch our Repo

    😀 Discuss our Repo

    Website

    Share our website (https://nasa-ammos.github.io/slim) with your friends and colleagues

    - + \ No newline at end of file diff --git a/index.html b/index.html index 846aa909..33cd07d0 100644 --- a/index.html +++ b/index.html @@ -11,13 +11,13 @@ - +

    A community-resource for exchanging and implementing best practices in software lifecycle improvements.

    Our Scope

    We focus on best practices related to software project governance, documentation, and development life-cycles.

    Community Based

    We solicit improvement ideas and solutions from our community deliver best practices back to our members. See our community planning board.

    Open Source

    We develop best practices through standards-as-code. We iteratively improve our recommendations through the open source tickets and pull requests.

    - + \ No newline at end of file diff --git a/markdown-page/index.html b/markdown-page/index.html index efeae0e1..4e164627 100644 --- a/markdown-page/index.html +++ b/markdown-page/index.html @@ -11,13 +11,13 @@ - +

    Markdown page example

    You don't need React to write simple standalone pages.

    - + \ No newline at end of file