diff --git a/.github/workflows/cmake-ctest.yml b/.github/workflows/cmake-ctest.yml index 36b6e4f6..c0b00096 100644 --- a/.github/workflows/cmake-ctest.yml +++ b/.github/workflows/cmake-ctest.yml @@ -8,6 +8,11 @@ on: description: "The hdf5 base name of the binaries" required: true type: string + snap_name: + description: 'The name in the source tarballs' + type: string + required: false + default: hdfsrc file_base: description: "The common base name of the source tarballs" required: true @@ -35,17 +40,29 @@ jobs: - name: Install Dependencies (Windows) run: choco install ninja - - uses: actions/checkout@v3 - - name: Enable Developer Command Prompt - uses: ilammy/msvc-dev-cmd@v1.12.1 + uses: ilammy/msvc-dev-cmd@v1.13.0 + + - name: Set file base name (Windows) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + if [[ '${{ inputs.use_environ }}' == 'release' ]] + then + SOURCE_NAME_BASE=$(echo "${{ inputs.snap_name }}") + else + SOURCE_NAME_BASE=$(echo "hdfsrc") + fi + echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT + shell: bash - name: Get hdf5 release if: ${{ (inputs.use_environ == 'snapshots') }} uses: dsaltares/fetch-gh-release-asset@master with: repo: 'HDFGroup/hdf5' - version: 'tags/snapshot' + version: 'tags/snapshot-1.14' file: '${{ inputs.use_hdf }}-win-vs2022_cl.zip' - name: Get hdf5 release @@ -87,21 +104,7 @@ jobs: - name: List files for the binaries (Win) run: | - ls -l ${{ github.workspace }}/HDF_Group/HDF5 - - - name: Set file base name (Windows) - id: set-file-base - run: | - FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") - echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT - if [[ '${{ inputs.use_environ }}' == 'snapshots' ]] - then - SOURCE_NAME_BASE=$(echo "hdfsrc") - else - SOURCE_NAME_BASE=$(echo "$FILE_NAME_BASE") - fi - echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT - shell: bash + ls -l ${{ github.workspace }}/HDF_Group/HDF5 # Get files created by release script - name: Get zip-tarball (Windows) @@ -139,7 +142,7 @@ jobs: mkdir "${{ runner.workspace }}/build" mkdir "${{ runner.workspace }}/build/hdf5_plugins" Copy-Item -Path ${{ runner.workspace }}/hdf5_plugins/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING -Destination ${{ runner.workspace }}/build/hdf5_plugins/ - Copy-Item -Path ${{ runner.workspace }}/hdf5_plugins/hdfsrc/README.md -Destination ${{ runner.workspace }}/build/hdf5_plugins/ + Copy-Item -Path ${{ runner.workspace }}/hdf5_plugins/build/${{ inputs.preset_name }}-MSVC/README.txt -Destination ${{ runner.workspace }}/build/hdf5_plugins/ Copy-Item -Path ${{ runner.workspace }}/hdf5_plugins/build/${{ inputs.preset_name }}-MSVC/* -Destination ${{ runner.workspace }}/build/hdf5_plugins/ -Include *.zip cd "${{ runner.workspace }}/build" 7z a -tzip ${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip hdf5_plugins @@ -166,16 +169,29 @@ jobs: runs-on: ubuntu-latest steps: - name: Install CMake Dependencies (Linux) - run: sudo apt-get install ninja-build doxygen graphviz + run: | + sudo apt-get update + sudo apt-get install ninja-build - - uses: actions/checkout@v3 + - name: Set file base name (Linux) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + if [[ '${{ inputs.use_environ }}' == 'release' ]] + then + SOURCE_NAME_BASE=$(echo "${{ inputs.snap_name }}") + else + SOURCE_NAME_BASE=$(echo "hdfsrc") + fi + echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT - name: Get hdf5 release if: ${{ (inputs.use_environ == 'snapshots') }} uses: dsaltares/fetch-gh-release-asset@master with: repo: 'HDFGroup/hdf5' - version: 'tags/snapshot' + version: 'tags/snapshot-1.14' file: '${{ inputs.use_hdf }}-ubuntu-2204_gcc.tar.gz' - name: Get hdf5 release @@ -188,8 +204,8 @@ jobs: - name: List files for the space (Linux) run: | - ls -l ${{ github.workspace }} - ls ${{ runner.workspace }} + ls -l ${{ github.workspace }} + ls ${{ runner.workspace }} - name: Uncompress gh binary (Linux) run: tar -zxvf ${{ github.workspace }}/${{ inputs.use_hdf }}-ubuntu-2204_gcc.tar.gz @@ -208,20 +224,7 @@ jobs: - name: List files for the binaries (Linux) run: | - ls -l ${{ github.workspace }}/hdf5/HDF_Group/HDF5 - - - name: Set file base name (Linux) - id: set-file-base - run: | - FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") - echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT - if [[ '${{ inputs.use_environ }}' == 'snapshots' ]] - then - SOURCE_NAME_BASE=$(echo "hdfsrc") - else - SOURCE_NAME_BASE=$(echo "$FILE_NAME_BASE") - fi - echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT + ls -l ${{ github.workspace }}/hdf5/HDF_Group/HDF5 # Get files created by release script - name: Get tgz-tarball (Linux) @@ -252,7 +255,7 @@ jobs: mkdir "${{ runner.workspace }}/build" mkdir "${{ runner.workspace }}/build/hdf5_plugins" cp ${{ runner.workspace }}/hdf5_plugins/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING ${{ runner.workspace }}/build/hdf5_plugins - cp ${{ runner.workspace }}/hdf5_plugins/hdfsrc/README.md ${{ runner.workspace }}/build/hdf5_plugins + cp ${{ runner.workspace }}/hdf5_plugins/build/${{ inputs.preset_name }}-GNUC/README.txt ${{ runner.workspace }}/build/hdf5_plugins cp ${{ runner.workspace }}/hdf5_plugins/build/${{ inputs.preset_name }}-GNUC/*.tar.gz ${{ runner.workspace }}/build/hdf5_plugins cd "${{ runner.workspace }}/build" tar -zcvf ${{ steps.set-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.tar.gz hdf5_plugins @@ -275,19 +278,30 @@ jobs: # MacOS w/ Clang + CMake # name: "MacOS Clang CMake" - runs-on: macos-11 + runs-on: macos-13 steps: - name: Install Dependencies (MacOS) - run: brew install ninja doxygen + run: brew install ninja - - uses: actions/checkout@v3 + - name: Set file base name (MacOS) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + if [[ '${{ inputs.use_environ }}' == 'release' ]] + then + SOURCE_NAME_BASE=$(echo "${{ inputs.snap_name }}") + else + SOURCE_NAME_BASE=$(echo "hdfsrc") + fi + echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT - name: Get hdf5 release if: ${{ (inputs.use_environ == 'snapshots') }} uses: dsaltares/fetch-gh-release-asset@master with: repo: 'HDFGroup/hdf5' - version: 'tags/snapshot' + version: 'tags/snapshot-1.14' file: '${{ inputs.use_hdf }}-osx12.tar.gz' - name: Get hdf5 release @@ -300,8 +314,8 @@ jobs: - name: List files for the space (MacOS) run: | - ls -l ${{ github.workspace }} - ls ${{ runner.workspace }} + ls -l ${{ github.workspace }} + ls ${{ runner.workspace }} - name: Uncompress gh binary (MacOS) run: tar -zxvf ${{ github.workspace }}/${{ inputs.use_hdf }}-osx12.tar.gz @@ -320,20 +334,7 @@ jobs: - name: List files for the binaries (MacOS) run: | - ls -l ${{ github.workspace }}/hdf5/HDF_Group/HDF5 - - - name: Set file base name (MacOS) - id: set-file-base - run: | - FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") - echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT - if [[ '${{ inputs.use_environ }}' == 'snapshots' ]] - then - SOURCE_NAME_BASE=$(echo "hdfsrc") - else - SOURCE_NAME_BASE=$(echo "$FILE_NAME_BASE") - fi - echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT + ls -l ${{ github.workspace }}/hdf5/HDF_Group/HDF5 # Get files created by release script - name: Get tgz-tarball (MacOS) @@ -365,7 +366,7 @@ jobs: mkdir "${{ runner.workspace }}/build" mkdir "${{ runner.workspace }}/build/hdf5_plugins" cp ${{ runner.workspace }}/hdf5_plugins/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING ${{ runner.workspace }}/build/hdf5_plugins - cp ${{ runner.workspace }}/hdf5_plugins/hdfsrc/README.md ${{ runner.workspace }}/build/hdf5_plugins + cp ${{ runner.workspace }}/hdf5_plugins/build/${{ inputs.preset_name }}-Clang/README.txt ${{ runner.workspace }}/build/hdf5_plugins cp ${{ runner.workspace }}/hdf5_plugins/build/${{ inputs.preset_name }}-Clang/*.tar.gz ${{ runner.workspace }}/build/hdf5_plugins cd "${{ runner.workspace }}/build" tar -zcvf ${{ steps.set-file-base.outputs.FILE_BASE }}-osx12.tar.gz hdf5_plugins @@ -383,4 +384,3 @@ jobs: name: tgz-osx12-binary path: ${{ runner.workspace }}/build/${{ steps.set-file-base.outputs.FILE_BASE }}-osx12.tar.gz if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` - diff --git a/.github/workflows/daily-build.yml b/.github/workflows/daily-build.yml index 97623b20..d290fe17 100644 --- a/.github/workflows/daily-build.yml +++ b/.github/workflows/daily-build.yml @@ -8,13 +8,8 @@ on: type: string required: false default: check - use_environ: - description: 'Environment to locate files' - type: string - required: false - default: snapshots pull_request: - branches: [ "master" ] + branches: [ "release/1_14_4" ] permissions: contents: read @@ -22,12 +17,12 @@ permissions: # A workflow run is made up of one or more jobs that can run sequentially or # in parallel. jobs: - get-base-names: + get-old-names: runs-on: ubuntu-latest outputs: hdf5-name: ${{ steps.gethdf5base.outputs.HDF5_NAME_BASE }} + plugin-name: ${{ steps.getpluginbase.outputs.PLUGIN_NAME_BASE }} run-ignore: ${{ steps.getinputs.outputs.INPUTS_IGNORE }} - run-environ: ${{ steps.getinputs.outputs.INPUTS_ENVIRON }} steps: - uses: actions/checkout@v4.1.1 @@ -36,44 +31,56 @@ jobs: uses: dsaltares/fetch-gh-release-asset@master with: repo: 'HDFGroup/hdf5' - version: 'tags/snapshot' + version: 'tags/snapshot-1.14' file: 'last-file.txt' - - name: Read base-name file + - name: Read HDF5 base-name file id: gethdf5base run: echo "HDF5_NAME_BASE=$(cat last-file.txt)" >> $GITHUB_OUTPUT - run: echo "hdf5 base name is ${{ steps.gethdf5base.outputs.HDF5_NAME_BASE }}." + - name: Get plugin release base name + uses: dsaltares/fetch-gh-release-asset@master + with: + repo: 'HDFGroup/hdf5_plugins' + version: 'tags/snapshot-1.14' + file: 'last-file.txt' + continue-on-error: true + + - name: Read base-name file + id: getpluginbase + run: echo "PLUGIN_NAME_BASE=$(cat last-file.txt)" >> $GITHUB_OUTPUT + + - run: echo "plugin base name is ${{ steps.getpluginbase.outputs.PLUGIN_NAME_BASE }}." + - name: Read inputs id: getinputs run: | echo "INPUTS_IGNORE=${{ ((github.event.inputs.use_ignore == '' && github.event.inputs.use_ignore) || 'ignore') }}" >> $GITHUB_OUTPUT - echo "INPUTS_ENVIRON=${{ ((github.event.inputs.use_environ == '' && github.event.inputs.use_environ) || 'snapshots') }}" >> $GITHUB_OUTPUT - run: echo "use_ignore is ${{ steps.getinputs.outputs.INPUTS_IGNORE }}." - - run: echo "use_environ is ${{ steps.getinputs.outputs.INPUTS_ENVIRON }}." - call-workflow-tarball: - needs: get-base-names + needs: [get-old-names] uses: ./.github/workflows/tarball.yml with: - use_ignore: ${{ needs.get-base-names.outputs.run-ignore }} - use_environ: ${{ needs.get-base-names.outputs.run-environ }} + use_tag: snapshot-1.14 + use_environ: snapshots call-workflow-ctest: - needs: [get-base-names, call-workflow-tarball] + needs: [get-old-names, call-workflow-tarball] uses: ./.github/workflows/cmake-ctest.yml with: - file_base: ${{ needs.call-workflow-tarball.outputs.file_base }} preset_name: ci-StdShar - use_hdf: ${{ needs.get-base-names.outputs.hdf5-name }} - use_environ: ${{ needs.get-base-names.outputs.run-environ }} - if: ${{ ((needs.get-base-names.outputs.run-environ == 'snapshots') && ((needs.call-workflow-tarball.outputs.has_changes == 'true') || (needs.get-base-names.outputs.run-ignore == 'ignore'))) || (needs.get-base-names.outputs.run-environ == 'release') }} + file_base: ${{ needs.call-workflow-tarball.outputs.file_base }} + use_hdf: ${{ needs.get-old-names.outputs.hdf5-name }} +# use_tag: snapshot-1.14 + use_environ: snapshots + if: ${{ ((needs.call-workflow-tarball.outputs.has_changes == 'true') || (needs.get-base-names.outputs.run-ignore == 'ignore')) }} call-workflow-release: - needs: [get-base-names, call-workflow-tarball, call-workflow-ctest] + needs: [get-old-names, call-workflow-tarball, call-workflow-ctest] permissions: contents: write # In order to allow tag creation uses: ./.github/workflows/release-files.yml @@ -81,7 +88,18 @@ jobs: file_base: ${{ needs.call-workflow-tarball.outputs.file_base }} file_branch: ${{ needs.call-workflow-tarball.outputs.file_branch }} file_sha: ${{ needs.call-workflow-tarball.outputs.file_sha }} - use_tag: snapshot - use_environ: ${{ needs.get-base-names.outputs.run-environ }} - if: ${{ ((needs.get-base-names.outputs.run-environ == 'snapshots') && ((needs.call-workflow-tarball.outputs.has_changes == 'true') || (needs.get-base-names.outputs.run-ignore == 'ignore'))) || (needs.get-base-names.outputs.run-environ == 'release') }} + use_tag: snapshot-1.14 + use_environ: snapshots + if: ${{ ((needs.call-workflow-tarball.outputs.has_changes == 'true') || (needs.get-base-names.outputs.run-ignore == 'ignore')) }} + + call-workflow-remove: + needs: [get-old-names, call-workflow-tarball, call-workflow-ctest, call-workflow-release] + permissions: + contents: write # In order to allow file deletion + uses: ./.github/workflows/remove-files.yml + with: + file_base: ${{ needs.get-old-names.outputs.plugin-name }} + use_tag: snapshot-1.14 + use_environ: snapshots + if: ${{ ((needs.call-workflow-tarball.outputs.has_changes == 'true') || (needs.get-base-names.outputs.run-ignore == 'ignore')) }} diff --git a/.github/workflows/release-files.yml b/.github/workflows/release-files.yml index c4f724e0..c4d16efb 100644 --- a/.github/workflows/release-files.yml +++ b/.github/workflows/release-files.yml @@ -8,7 +8,7 @@ on: description: 'Release version tag' type: string required: false - default: snapshot + default: snapshot-1.14 use_environ: description: 'Environment to locate files' type: string @@ -55,7 +55,7 @@ jobs: commit_sha: ${{ inputs.file_sha }} tag: "${{ inputs.use_tag }}" force_push_tag: true - message: "Latest snapshot" + message: "Latest snapshot-1.14" if: ${{ inputs.use_environ == 'snapshots' }} # Print result using the action output. @@ -109,21 +109,20 @@ jobs: - name: Create sha256 sums for files run: | - sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}.tar.gz >> sha256sums.txt - sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}.zip >> sha256sums.txt - sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-osx12.tar.gz >> sha256sums.txt - sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.tar.gz >> sha256sums.txt - sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip >> sha256sums.txt + sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt + sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}.zip >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt + sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-osx12.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt + sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt + sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt - name: Store snapshot name - if: ${{ (inputs.use_environ == 'snapshots') }} run: | echo "${{ steps.get-file-base.outputs.FILE_BASE }}" > ./last-file.txt - name: PreRelease tag id: create_prerelease if: ${{ (inputs.use_environ == 'snapshots') }} - uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v1 + uses: softprops/action-gh-release@9d7c94cfd0a1f3ed45544c887983e9fa900f0564 # v2.0.4 with: tag_name: "${{ inputs.use_tag }}" prerelease: true @@ -134,13 +133,13 @@ jobs: ${{ steps.get-file-base.outputs.FILE_BASE }}-osx12.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip - sha256sums.txt + ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` - name: Release tag id: create_release if: ${{ (inputs.use_environ == 'release') }} - uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v1 + uses: softprops/action-gh-release@9d7c94cfd0a1f3ed45544c887983e9fa900f0564 # v2.0.4 with: tag_name: "${{ inputs.use_tag }}" prerelease: false @@ -150,7 +149,7 @@ jobs: ${{ steps.get-file-base.outputs.FILE_BASE }}-osx12.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip - sha256sums.txt + ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` - name: List files for the space (Linux) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8e1c6ca8..d883c5dc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,24 +1,18 @@ -name: hdf5 plugins release build +name: hdf5 1.14 plugins release build -# Controls when the action will run. Triggers the workflow on a manual run on: workflow_dispatch: inputs: - use_tag: + hdf_tag: description: 'Release hdf5 version tag' type: string required: false - default: snapshot - use_name: - description: 'Plugin Release Name' - type: string - required: false - default: hdf5_plugins-snapshot - use_environ: - description: 'Environment to locate files' + default: snapshot-1.14 + use_tag: + description: 'Release version tag' type: string required: false - default: snapshots + default: snapshot-1.14 permissions: contents: read @@ -29,83 +23,44 @@ jobs: log-the-inputs: runs-on: ubuntu-latest outputs: + hdf_tag: ${{ steps.get-tag-name.outputs.HDF_TAG }} rel_tag: ${{ steps.get-tag-name.outputs.RELEASE_TAG }} - rel_name: ${{ steps.get-tag-name.outputs.RELEASE_NAME }} - run-environ: ${{ steps.get-tag-name.outputs.INPUTS_ENVIRON }} - steps: - name: Get tag name id: get-tag-name env: + HDFTAG: ${{ inputs.hdf_tag }} TAG: ${{ inputs.use_tag }} - TAG_NAME: ${{ inputs.use_name }} run: | + echo "HDF_TAG=$HDFTAG" >> $GITHUB_OUTPUT echo "RELEASE_TAG=$TAG" >> $GITHUB_OUTPUT - echo "RELEASE_NAME=$TAG_NAME" >> $GITHUB_OUTPUT - echo "INPUTS_ENVIRON=${{ ((github.event.inputs.use_environ == '' && github.event.inputs.use_environ) || 'snapshots') }}" >> $GITHUB_OUTPUT - create-files-ctest: + call-workflow-tarball: needs: log-the-inputs - runs-on: ubuntu-latest - outputs: - file_base: ${{ steps.set-file-base.outputs.FILE_BASE }} - steps: - - name: Set file base name - id: set-file-base - run: | - FILE_NAME_BASE=$(echo "${{ needs.log-the-inputs.outputs.rel_name }}") - echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT - shell: bash - - # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - name: Get Sources - uses: actions/checkout@v3 - with: - path: hdfsrc - - - name: Zip Folder - run: | - zip -r ${{ steps.set-file-base.outputs.FILE_BASE }}.zip ./hdfsrc - tar -zcvf ${{ steps.set-file-base.outputs.FILE_BASE }}.tar.gz ./hdfsrc - - - name: List files in the repository - run: | - ls -l ${{ github.workspace }} - ls $GITHUB_WORKSPACE - - # Save files created by release script - - name: Save tgz-tarball - uses: actions/upload-artifact@v4 - with: - name: tgz-tarball - path: ${{ steps.set-file-base.outputs.FILE_BASE }}.tar.gz - if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` - - - name: Save zip-tarball - uses: actions/upload-artifact@v4 - with: - name: zip-tarball - path: ${{ steps.set-file-base.outputs.FILE_BASE }}.zip - if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + uses: ./.github/workflows/tarball.yml + with: + use_tag: 1.14.4 + use_environ: release call-workflow-ctest: - needs: [log-the-inputs, create-files-ctest] + needs: [log-the-inputs, call-workflow-tarball] uses: ./.github/workflows/cmake-ctest.yml with: - file_base: ${{ needs.create-files-ctest.outputs.file_base }} preset_name: ci-StdShar - use_hdf: ${{ needs.log-the-inputs.outputs.rel_tag }} - use_environ: ${{ needs.log-the-inputs.outputs.run-environ }} + file_base: ${{ needs.call-workflow-tarball.outputs.file_base }} + use_hdf: ${{ needs.log-the-inputs.outputs.hdf_tag }} + snap_name: hdf5_plugins-${{ needs.call-workflow-tarball.outputs.source_base }} + use_environ: release call-workflow-release: - needs: [log-the-inputs, create-files-ctest, call-workflow-ctest] + needs: [log-the-inputs, call-workflow-tarball, call-workflow-ctest] permissions: contents: write # In order to allow tag creation uses: ./.github/workflows/release-files.yml with: - file_base: ${{ needs.create-files-ctest.outputs.file_base }} - file_branch: ${{ needs.log-the-inputs.outputs.rel_name }} - file_sha: ${{ needs.log-the-inputs.outputs.rel_tag }} + file_base: ${{ needs.call-workflow-tarball.outputs.file_base }} + file_branch: ${{ needs.call-workflow-tarball.outputs.file_branch }} + file_sha: ${{ needs.call-workflow-tarball.outputs.file_sha }} use_tag: ${{ needs.log-the-inputs.outputs.rel_tag }} - use_environ: ${{ needs.log-the-inputs.outputs.run-environ }} + use_environ: release diff --git a/.github/workflows/remove-files.yml b/.github/workflows/remove-files.yml new file mode 100644 index 00000000..ac7c9e04 --- /dev/null +++ b/.github/workflows/remove-files.yml @@ -0,0 +1,54 @@ +name: hdf5 plugins remove-files + +# Controls when the action will run. Triggers the workflow on a schedule +on: + workflow_call: + inputs: + use_tag: + description: 'Release version tag' + type: string + required: false + default: snapshot-1.14 + use_environ: + description: 'Environment to locate files' + type: string + required: true + default: snapshots + file_base: + description: "The common base name of the source tarballs" + required: true + type: string + +# Minimal permissions to be inherited by any job that doesn't declare its own permissions +permissions: + contents: read + +# Previous workflows must pass to get here so tag the commit that created the files +jobs: + PreRelease-delfiles: + runs-on: ubuntu-latest + environment: ${{ inputs.use_environ }} + permissions: + contents: write + steps: + - name: Get file base name + id: get-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + + - name: PreRelease delete from tag + id: delete_prerelease + if: ${{ (inputs.use_environ == 'snapshots') }} + uses: mknejp/delete-release-assets@v1 + with: + token: ${{ github.token }} + tag: "${{ inputs.use_tag }}" + assets: | + ${{ steps.get-file-base.outputs.FILE_BASE }}.tar.gz + ${{ steps.get-file-base.outputs.FILE_BASE }}.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}-osx12.tar.gz + ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.tar.gz + ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt + \ No newline at end of file diff --git a/.github/workflows/tarball.yml b/.github/workflows/tarball.yml index 85f8b90a..eccbd83f 100644 --- a/.github/workflows/tarball.yml +++ b/.github/workflows/tarball.yml @@ -4,11 +4,11 @@ name: hdf5 plugins tarball on: workflow_call: inputs: - use_ignore: - description: 'Ignore has changes check' + use_tag: + description: 'Release version tag' type: string required: false - default: check + default: snapshot-1.14 use_environ: description: 'Environment to locate files' type: string @@ -18,6 +18,9 @@ on: has_changes: description: "Whether there were changes the previous day" value: ${{ jobs.check_commits.outputs.has_changes }} + source_base: + description: "The common base name of the source tarballs" + value: ${{ jobs.create_tarball.outputs.source_base }} file_base: description: "The common base name of the source tarballs" value: ${{ jobs.create_tarball.outputs.file_base }} @@ -66,7 +69,7 @@ jobs: with: seconds: 86400 # One day in seconds branch: '${{ steps.get-branch-name.outputs.branch_ref }}' - if: ${{ (inputs.use_environ == 'snapshots' && inputs.use_ignore == 'check') }} + if: ${{ inputs.use_environ == 'snapshots' }} - run: echo "You have ${{ steps.check-new-commits.outputs.new-commits-number }} new commit(s) in ${{ steps.get-branch-name.outputs.BRANCH_REF }} ✅!" if: ${{ steps.check-new-commits.outputs.has-new-commits == 'true' }} @@ -77,9 +80,10 @@ jobs: name: Create a source tarball runs-on: ubuntu-latest needs: check_commits - if: ${{ ((inputs.use_environ == 'snapshots') && ((needs.check_commits.outputs.has_changes == 'true') || (inputs.use_ignore == 'ignore'))) || (inputs.use_environ == 'release') }} + if: ${{ ((inputs.use_environ == 'snapshots') && (needs.check_commits.outputs.has_changes == 'true')) || (inputs.use_environ == 'release') }} outputs: file_base: ${{ steps.set-file-base.outputs.FILE_BASE }} + source_base: ${{ steps.version.outputs.SOURCE_TAG }} steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - name: Get Sources @@ -91,16 +95,21 @@ jobs: id: version run: | cd "$GITHUB_WORKSPACE/hdfsrc" - echo "TAG_VERSION=master" >> $GITHUB_OUTPUT + echo "SOURCE_TAG=${{ inputs.use_tag }}" >> $GITHUB_OUTPUT - name: Set file base name id: set-file-base run: | - if [[ '${{ inputs.use_environ }}' == 'snapshots' && '${{ needs.check_commits.outputs.has_changes }}' == 'true' ]] + if [[ '${{ inputs.use_environ }}' == 'snapshots' ]] then FILE_NAME_BASE=$(echo "hdf5_plugins-${{ needs.check_commits.outputs.branch_ref }}-${{ needs.check_commits.outputs.branch_sha }}") else - FILE_NAME_BASE=$(echo "hdf5_plugins-${{ steps.version.outputs.TAG_VERSION }}") + if [[ '${{ inputs.use_tag }}' == 'snapshot-1.14' ]] + then + FILE_NAME_BASE=$(echo "snapshot-1.14") + else + FILE_NAME_BASE=$(echo "hdf5_plugins-${{ steps.version.outputs.SOURCE_TAG }}") + fi fi echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT shell: bash @@ -112,11 +121,21 @@ jobs: - name: Create source file (tgz and zip) id: create-files + if: ${{ (inputs.use_environ == 'snapshots') }} run: | zip -r ${{ steps.set-file-base.outputs.FILE_BASE }}.zip ./hdfsrc tar -zcvf ${{ steps.set-file-base.outputs.FILE_BASE }}.tar.gz ./hdfsrc shell: bash + - name: Create release source file (tgz and zip) + id: create-rel-files + if: ${{ (inputs.use_environ == 'release') }} + run: | + mv hdfsrc ${{ steps.set-file-base.outputs.FILE_BASE }} + zip -r ${{ steps.set-file-base.outputs.FILE_BASE }}.zip ./${{ steps.set-file-base.outputs.FILE_BASE }} + tar -zcvf ${{ steps.set-file-base.outputs.FILE_BASE }}.tar.gz ./${{ steps.set-file-base.outputs.FILE_BASE }} + shell: bash + - name: List files in the repository run: | ls -l ${{ github.workspace }} diff --git a/Building.txt b/Building.txt index 0610fad6..a668dd70 100644 --- a/Building.txt +++ b/Building.txt @@ -1,4 +1,4 @@ -HDF5 plugins +HDF5 1.14.4 plugins Building these filter/example requires knowledge of the hdf5 and the compression library installation. Out-of-source build process is expected. diff --git a/CMakeLists.txt b/CMakeLists.txt index c8c89a21..7039d805 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -125,14 +125,16 @@ endif () # LZ4 filter if (NOT MINGW) - if (WIN32 AND MSVC_VERSION GREATER_EQUAL 1930) - if (NOT CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" AND NOT CMAKE_C_COMPILER_ID MATCHES "Intel[Ll][Ll][Vv][Mm]") - FILTER_OPTION (LZ4) + if (WIN32) + if (NOT CMAKE_C_COMPILER_ID MATCHES "Intel[Ll][Ll][Vv][Mm]") + if (MSVC_VERSION GREATER_EQUAL 1930 AND NOT CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + FILTER_OPTION (LZ4) + else () + set (ENABLE_LZ4 OFF CACHE BOOL "" FORCE) + endif () else () set (ENABLE_LZ4 OFF CACHE BOOL "" FORCE) endif () - elseif (WIN32 AND NOT CMAKE_C_COMPILER_ID MATCHES "Intel[Ll][Ll][Vv][Mm]") - FILTER_OPTION (LZ4) else () FILTER_OPTION (LZ4) #set (ENABLE_LZ4 OFF CACHE BOOL "" FORCE) diff --git a/CMakePresets.json b/CMakePresets.json index 50ed4972..423f3754 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -10,8 +10,8 @@ "inherits": "ci-base", "cacheVariables": { "H5PL_VERS_MAJOR": {"type": "STRING", "value": "1"}, - "H5PL_VERS_MINOR": {"type": "STRING", "value": "15"}, - "H5PL_VERS_RELEASE": {"type": "STRING", "value": "0"}, + "H5PL_VERS_MINOR": {"type": "STRING", "value": "14"}, + "H5PL_VERS_RELEASE": {"type": "STRING", "value": "4"}, "H5PL_ALLOW_EXTERNAL_SUPPORT": {"type": "STRING", "value": "TGZ"}, "H5PL_COMP_TGZPATH": {"type": "STRING", "value": "${sourceDir}/libs"}, "H5PL_BUILD_TESTING": "ON", @@ -22,7 +22,7 @@ "name": "ci-base-plugins", "hidden": true, "cacheVariables": { - "PLUGIN_TGZ_NAME": {"type": "STRING", "value": "hdf5_plugins-master.tar.gz"}, + "PLUGIN_TGZ_NAME": {"type": "STRING", "value": "hdf5_plugins-1.14.4.tar.gz"}, "PLUGIN_PACKAGE_NAME": {"type": "STRING", "value": "pl"}, "PL_PACKAGE_NAME": "pl", "HDF5_NAMESPACE": {"type": "STRING", "value": "hdf5::"}, diff --git a/config/cmake/cacheinit.cmake b/config/cmake/cacheinit.cmake index befe8b2d..7aa58787 100644 --- a/config/cmake/cacheinit.cmake +++ b/config/cmake/cacheinit.cmake @@ -34,8 +34,8 @@ set_property (CACHE H5PL_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) set (H5PL_GIT_URL "https://github.com/HDFGroup/h5plugin.git" CACHE STRING "Use plugins from HDF repository" FORCE) set (H5PL_GIT_BRANCH "master" CACHE STRING "" FORCE) -set (H5PL_TGZ_ORIGPATH "https://github.com/HDFGroup/hdf5_plugins/releases/download/snapshots" CACHE STRING "Use PLUGINS from original location" FORCE) -set (H5PL_TGZ_NAME "hdf5_plugins-master.tar.gz" CACHE STRING "Use plugins from compressed file" FORCE) +set (H5PL_TGZ_ORIGPATH "https://github.com/HDFGroup/hdf5_plugins/releases/download/snapshot-1.14" CACHE STRING "Use PLUGINS from original location" FORCE) +set (H5PL_TGZ_NAME "hdf5_plugins-1_14_4.tar.gz" CACHE STRING "Use plugins from compressed file" FORCE) set (PL_PACKAGE_NAME "pl" CACHE STRING "Name of plugins package" FORCE) set (H5PL_CPACK_ENABLE ON CACHE BOOL "Enable the CPACK include and components" FORCE) diff --git a/docs/RegisteredFilterPlugins.md b/docs/RegisteredFilterPlugins.md index 39b877b8..bff188fb 100644 --- a/docs/RegisteredFilterPlugins.md +++ b/docs/RegisteredFilterPlugins.md @@ -21,33 +21,677 @@ List of Filters Registered with The HDF Group --------------------------------------------- |Filter |Identifier Name |Short Description| |--------|----------------|---------------------| -|305 |LZO |LZO lossless compression used by PyTables| -|307 |BZIP2 |BZIP2 lossless compression used by PyTables| -|32000 |LZF |LZF lossless compression used by H5Py project| -|32001 |BLOSC |Blosc lossless compression used by PyTables| -|32002 |MAFISC |Modified LZMA compression filter, MAFISC (Multidimensional Adaptive Filtering Improved Scientific data Compression)| -|32003 |Snappy |Snappy lossless compression| -|32004 |LZ4 |LZ4 fast lossless compression algorithm| -|32005 |APAX |Samplify’s APAX Numerical Encoding Technology| -|32006 |CBF |All imgCIF/CBF compressions and decompressions, including Canonical, Packed, Packed Version 2, Byte Offset and Nibble Offset| -|32007 |JPEG-XR |Enables images to be compressed/decompressed with JPEG-XR compression| -|32008 |bitshuffle |Extreme version of shuffle filter that shuffles data at bit level instead of byte level| -|32009 |SPDP |SPDP fast lossless compression algorithm for single- and double-precision floating-point data| -|32010 |LPC-Rice |LPC-Rice multi-threaded lossless compression| -|32011 |CCSDS-123 |ESA CCSDS-123 multi-threaded compression filter| -|32012 |JPEG-LS |CharLS JPEG-LS multi-threaded compression filter| -|32013 |zfp |Lossy & lossless compression of floating point and integer datasets to meet rate, accuracy, and/or precision targets.| -|32014 |fpzip |Fast and Efficient Lossy or Lossless Compressor for Floating-Point Data| -|32015 |Zstandard |Real-time compression algorithm with wide range of compression / speed trade-off and fast decoder| -|32016 |B³D |GPU based image compression method developed for light-microscopy applications| -|32017 |SZ |An error-bounded lossy compressor for scientific floating-point data| -|32018 |FCIDECOMP |EUMETSAT CharLS compression filter for use with netCDF| -|32019 |JPEG |Jpeg compression filter| -|32020 |VBZ |Compression filter for raw dna signal data used by Oxford Nanopore| -|32021 |FAPEC | Versatile and efficient data compressor supporting many kinds of data and using an outlier-resilient entropy coder| -|32022 |BitGroom |The BitGroom quantization algorithm| -|32023 |Granular |BitRound (GBR) The GBG quantization algorithm is a significant improvement to the BitGroom filter| -|32024 |SZ3 |A modular error-bounded lossy compression framework for scientific datasets| -|32025 |Delta-Rice |Lossless compression algorithm optimized for digitized analog signals based on delta encoding and rice coding| -|32026 |BLOSC |The recent new-generation version of the Blosc compression library| -|32027 |FLAC |FLAC audio compression filter in HDF5| +|305 |LZO |LZO lossless compression used by PyTables| +|307 |BZIP2 |BZIP2 lossless compression used by PyTables| +|32000 |LZF |LZF lossless compression used by H5Py project| +|32001 |BLOSC |Blosc lossless compression used by PyTables| +|32002 |MAFISC |Modified LZMA compression filter, MAFISC (Multidimensional Adaptive Filtering Improved Scientific data Compression)| +|32003 |Snappy |Snappy lossless compression| +|32004 |LZ4 |LZ4 fast lossless compression algorithm| +|32005 |APAX |Samplify’s APAX Numerical Encoding Technology| +|32006 |CBF |All imgCIF/CBF compressions and decompressions, including Canonical, Packed, Packed Version 2, Byte Offset and Nibble Offset| +|32007 |JPEG-XR |Enables images to be compressed/decompressed with JPEG-XR compression| +|32008 |bitshuffle |Extreme version of shuffle filter that shuffles data at bit level instead of byte level| +|32009 |SPDP |SPDP fast lossless compression algorithm for single- and double-precision floating-point data| +|32010 |LPC-Rice |LPC-Rice multi-threaded lossless compression| +|32011 |CCSDS-123 |ESA CCSDS-123 multi-threaded compression filter| +|32012 |JPEG-LS |CharLS JPEG-LS multi-threaded compression filter| +|32013 |zfp |Lossy & lossless compression of floating point and integer datasets to meet rate, accuracy, and/or precision targets.| +|32014 |fpzip |Fast and Efficient Lossy or Lossless Compressor for Floating-Point Data| +|32015 |Zstandard |Real-time compression algorithm with wide range of compression / speed trade-off and fast decoder| +|32016 |B³D |GPU based image compression method developed for light-microscopy applications| +|32017 |SZ |An error-bounded lossy compressor for scientific floating-point data| +|32018 |FCIDECOMP |EUMETSAT CharLS compression filter for use with netCDF| +|32019 |JPEG |Jpeg compression filter| +|32020 |VBZ |Compression filter for raw dna signal data used by Oxford Nanopore| +|32021 |FAPEC | Versatile and efficient data compressor supporting many kinds of data and using an outlier-resilient entropy coder| +|32022 |BitGroom |The BitGroom quantization algorithm| +|32023 |Granular |BitRound (GBR) The GBG quantization algorithm is a significant improvement to the BitGroom filter| +|32024 |SZ3 |A modular error-bounded lossy compression framework for scientific datasets| +|32025 |Delta-Rice |Lossless compression algorithm optimized for digitized analog signals based on delta encoding and rice coding| +|32026 |BLOSC |The recent new-generation version of the Blosc compression library| +|32027 |FLAC |FLAC audio compression filter in HDF5| +|32028 |H5Z-SPERR |H5Z-SPERR is the HDF5 filter for SPERR| + + +##
The Filters
+ +

LZO Filter

+ +#### Filter ID: 305 + +#### Filter Description: +LZO is a portable lossless data compression library written in ANSI C. +Reliable and thoroughly tested. High adoption - each second terrabytes of data are compressed by LZO. No bugs since the first release back in 1996. +Offers pretty fast compression and *extremely* fast decompression. +Includes slower compression levels achieving a quite competitive compression ratio while still decompressing at this very high speed. +Distributed under the terms of the GNU General Public License (GPL v2+). Commercial licenses are available on request. +Military-grade stability and robustness. + +#### Filter Information: +http://www.oberhumer.com/opensource/lzo/ +http://www.pytables.org + +#### Contact Information: +Francesc Alted +Email: faltet at pytables dot org + +## + +

BZIP2 Filter

+ +#### Filter ID: 307 + +#### Filter Description: +bzip2 is a freely available, patent free, high-quality data compressor. It typically compresses files to within 10% to 15% of the best available techniques (the PPM family of statistical compressors), whilst being around twice as fast at compression and six times faster at decompression. + +#### Filter Information: +http://www.bzip.org +http://www.pytables.org + +#### Contact Information: +Francesc Alted +Email: faltet at pytables dot org + +## + +

LZF Filter

+ +#### Filter ID: 32000 + +#### Filter Description: +The LZF filter is an alternative DEFLATE-style compressor for HDF5 datasets, using the free LZF library by Marc Alexander Lehmann. Its main benefit over the built-in HDF5 DEFLATE filter is speed; in memory-to-memory operation as part of the filter pipeline, it typically compresses 3x-5x faster than DEFLATE, and decompresses 2x faster, while maintaining 50% to 90% of the DEFLATE compression ratio. + +LZF can be used to compress any data type, and requires no compile-time or run-time configuration. HDF5 versions 1.6.5 through 1.8.3 are supported. The filter is written in C and can be included directly in C or C++ applications; it has no external dependencies. The license is 3-clause BSD (virtually unrestricted, including commercial applications). + +More information, downloads, and benchmarks, are available at the http://h5py.org/lzf/. + +Additional Information: + +The LZF filter was developed as part of the h5py project, which implements a general-purpose interface to HDF5 from Python. + +#### Filter Information: + +The h5py homepage: http://h5py.org + +The LZF library homepage: http://home.schmorp.de/marc/liblzf.html + +#### Contact Information: + +Andrew Collette +Web: http://h5py.org + +## + +

Blosc Filter

+ +#### Filter ID: 32001 + +#### Filter Description: +Blosc is a high performance compressor optimized for binary data. It has been designed to compress data very fast, at the expense of achieving lesser compression ratios than, say, zlib+shuffle. It is mainly meant to not introduce a significant delay when dealing with data that is stored in high-performance I/O systems (like large RAID cabinets, or even the OS filesystem memory cache). + +It uses advanced cache-efficient techniques to reduce activity on the memory bus as much as possible. It also leverages SIMD (SSE2) and multi-threading capabilities present in nowadays multi-core processors so as to accelerate the compression/decompression process to a maximum. + +#### Filter Information: + +http://blosc.org/ +http://www.pytables.org + +#### Contact Information: + +Francesc Alted +Email: faltet at pytables dot org + +## + +

MAFISC Filter

+ +#### Filter ID: 32002 + +#### Filter Description: +This compressing filter exploits the multidimensionality and smoothness characterizing many scientific data sets. It adaptively applies some filters to preprocess the data and uses lzma as the actual compression step. It significantly outperforms pure lzma compression on most datasets. + +The software is currently under a rather unrestrictive two clause BSD style license. + +#### Filter Information: + +http://wr.informatik.uni-hamburg.de/research/projects/icomex/mafisc + +#### Contact Information: + +Nathanael Huebbe +Email: nathanael.huebbe at informatik dot uni-hamburg dot de + +## + +

Snappy Filter

+ +#### Filter ID: 32003 + +#### Filter Description: +Snappy-CUDA is a compression/decompression library that leverages GPU processing power to compress/decompress data. The Snappy compression algorithm does not aim for maximum compression or compatibility with any other compression library; instead, it aims for very high speeds and reasonable compression. For instance, compared to the fastest mode of zlib, the reference implementation of Snappy on the CPU is an order of magnitude faster for most inputs, but the resulting compressed files are anywhere from 20% to 100% bigger. + +#### Filter Information: + +https://github.com/lucasvr/snappy-cuda +https://github.com/google/snappy + +#### Contact Information: + +Lucas C. Villa Real +Email: lucasvr at gmail dot com + +## + +

LZ4 Filter

+ +#### Filter ID: 32004 + +#### Filter Description: +LZ4 is a very fast lossless compression algorithm, providing compression speed at 300 MB/s per core, scalable with multi-cores CPU. It also features an extremely fast decoder, with speeds up and beyond 1GB/s per core, typically reaching RAM speed limits on multi-core systems. For a format description of the LZ4 compression filter in HDF5, see HDF5_LZ4.pdf. + +#### Filter Information: + +LZ4 Algorithm: https://github.com/nexusformat/HDF5-External-Filter-Plugins/tree/master/LZ4 + +LZ4 Code: + +Although the LZ4 software is not supported by The HDF Group, it is included in The HDF Group SVN repository so that it can be tested regularly with HDF5. For convenience, users can obtain it from SVN with the following command: + svn checkout https://svn.hdfgroup.org/hdf5_plugins/trunk/LZ4 LZ4 + +#### Contact Information: + +Michael Rissi (Dectris Ltd.) +Email: michael dot rissi at dectris dot com + +## + +

APAX

+ +#### Filter ID: 32005 + +Appears to be no longer available + +## + +

CBF

+ +#### Filter ID: 32006 + +#### Filter Description: +All imgCIF/CBF compressions and decompressions, including Canonical, Packed, Packed Vesrsion 2, Byte Offset and Nibble Offset. +License Information: GPL and LGPL + +#### Contact Information: + +Herbert J. Bernstein +Email: yayahjb at gmail dot com + +## + +

JPEG-XR

+ +#### Filter ID: 32007 + +#### Filter Description: +Filter that allows HDF5 image datasets to be compressed or decompressed using the JPEG-XR compression method. + +#### Filter Information: + +JPEG-XR Compression Method +JPEG-XR Filter for HDF5 + +#### Contact Information: + +Marvin Albert +Email: marvin dot albert at gmail dot com + +## + +

bitshuffle

+ +#### Filter ID: 32008 + +#### Filter Description: +This filter shuffles data at the bit level to improve compression. CHIME uses this filter for data acquisition. + +#### Filter Information: + +bitshuffle +CHIME + +#### Contact Information: + +Kiyoshi Masui +Email: kiyo at physics dot ubc dot ca + +## + +

SPDP

+ +#### Filter ID: 32009 + +#### Filter Description: +SPDP is a fast, lossless, unified compression/decompression algorithm designed for both 32-bit single-precision (float) and 64-bit double-precision (double) floating-point data. It also works on other data. + +#### Filter Information: + +http://cs.txstate.edu/~burtscher/research/SPDP/ + +#### Contact Information: + +Martin Burtscher +Email: burtscher at txstate dot edu + +## + +

LPC-Rice

+ +#### Filter ID: 32010 + +#### Filter Description: +LPC-Rice is a fast lossless compression codec that employs Linear Predictive Coding together with Rice coding. It supports multi-threading and SSE2 vector instructions, enabling it to exceed compression and decompression speeds of 1 GB/s. + +#### Filter Information: + +https://sourceforge.net/projects/lpcrice/ + +#### Contact Information: + +Frans van den Bergh +Email: fvdbergh at csir dot co dot za + +Derick Swanepoel +Email: dswanepoel at gmail dot com + +## + +

CCSDS-123

+ +#### Filter ID: 32011 + +#### Filter Description: +CCSDS-123 is a multi-threaded HDF5 compression filter using the ESA CCSDS-123 implementation. + +#### Filter Information: + +https://sourceforge.net/projects/ccsds123-hdf-filter/ + +#### Contact Information: + +Frans van den Bergh +Email: fvdbergh at csir dot co dot za + +Derick Swanepoel +Email: dswanepoel at gmail dot com + +## + +

JPEG-LS

+ +#### Filter ID: 32012 + +#### Filter Description: +JPEG-LS is a multi-threaded HDF5 compression filter using the CharLS JPEG-LS implementation. + +#### Filter Information: + +https://sourceforge.net/projects/jpegls-hdf-filter/ + +#### Contact Information: + +Frans van den Bergh +Email: fvdbergh at csir dot co dot za + +Derick Swanepoel +Email: dswanepoel at gmail dot com + +## + +

zfp

+ +#### Filter ID: 32013 + +#### Filter Description: +zfp is a BSD licensed open source C++ library for compressed floating-point arrays that support very high throughput read and write random access. zfp was designed to achieve high compression ratios and therefore uses lossy but optionally error-bounded compression. Although bit-for-bit lossless compression is not always possible, zfp is usually accurate to within machine epsilon in near-lossless mode, and is often orders of magnitude more accurate and faster than other lossy compressors. + +#### Filter Information: + +https://github.com/LLNL/H5Z-ZFP + +For more information see: http://computation.llnl.gov/projects/floating-point-compression/ + +#### Contact Information: + +Mark Miller +Email: miller86 at llnl dot gov + +Peter Lindstrom +Email: pl at llnl dot gov + +## + +

fpzip

+ +#### Filter ID: 32014 + +#### Filter Description: +fpzip is a library for lossless or lossy compression of 2D or 3D floating-point scalar fields. Although written in C++, fpzip has a C interface. fpzip was developed by Peter Lindstrom at LLNL. + +#### Filter Information: + +For more information see: http://computation.llnl.gov/projects/floating-point-compression/ + +#### Contact Information: + +Peter Lindstrom +Email: pl at llnl dot gov + +## + +

Zstandard

+ +#### Filter ID: 32015 + +#### Filter Description: +Zstandard is a real-time compression algorithm, providing high compression ratios. It offers a very wide range of compression / speed trade-offs, while being backed by a very fast decoder. The Zstandard library is provided as open source software using a BSD license. + +#### Filter Information: + +https://github.com/aparamon/HDF5Plugin-Zstandard + +#### Contact Information: + +Andrey Paramonov +Email: paramon at acdlabs dot ru + +## + +

B³D

+ +#### Filter ID: 32016 + +#### Filter Description: +B³D is a fast (~1 GB/s), GPU based image compression method, developed for light-microscopy applications. Alongside lossless compression, it offers a noise dependent lossy compression mode, where the loss can be tuned as a proportion of the inherent image noise (accounting for photon shot noise and camera read noise). It not only allows for fast compression during image, but can achieve compression ratios up 100. + +[Information](http://www.biorxiv.org/content/early/2017/07/21/164624) + +## + +

SZ

+ +#### Filter ID: 32017 + +#### Filter Description: +SZ is a fast and efficient error-bounded lossy compressor for floating-point data. It was developed for scientific applications producing large-scale HPC data sets. SZ supports C, Fortran, and Java and has been tested on Linux and Mac OS X. + +#### Filter Information: + +[Information](https://collab.cels.anl.gov/display/ESR/SZ) +[github](https://github.com/disheng222/SZ) +[License](https://www.mcs.anl.gov/~shdi/download/sz-download.html) + +#### Contact Information: + +Sheng Di +Email: sdi1 at anl dot gov + +Franck Cappello +Email: cappello at mcs dot anl dot gov + +## + +

FCIDECOMP

+ +#### Filter ID: 32018 + +#### Filter Description: +FCIDECOMP is a third-party compression filter used at EUMETSAT for the compression of netCDF-4 files. It is a codec implementing JPEG-LS using CharLS used for satellite imagery. + +#### Filter Information: + +All software and documentation can be found at this link: + +ftp://ftp.eumetsat.int/pub/OPS/out/test-data/Test-data-for-External-Users/MTG_FCI_L1c_Compressed-Datasets_and_Decompression-Plugin_April2017/Decompression_Plugin/ + +#### Contact Information: + +Dr. Daniel Lee +Email: daniel dot lee at eumetsat dot int + +## + +

JPEG

+ +#### Filter ID: 32019 + +#### Filter Description: +This is a lossy compression filter. It provides a user-specified "quality factor" to control the trade-off of size versus accuracy. + +#### Filter Information: + +Information +Github +License + +libjpeg: This library is available as a package for most Linux distributions, and source code is available from https://www.ijg.org/. + +Restrictions: + +Only 8-bit unsigned data arrays are supported. +Arrays must be either: + 2-D monochromatic [NumColumns, NumRows] + 3-D RGB [3, NumColumns, NumRows] +Chunking must be set to the size of one entire image so the filter is called once for each image. +Using the JPEG filter in your application: + +HDF5 only supports compression for "chunked" datasets; this just means that you need to call H5Pset_chunk to specify a chunk size. The chunking must be set to the size of a single image for the JPEG filter to work properly. + +When calling H5Pset_filter for compression it must be called with cd_nelmts=4 and cd_values as follows: + + cd_values[0] = quality factor (1-100) + + cd_values[1] = numColumns + + cd_values[2] = numRows + + cd_values[3] = 0=Mono, 1=RGB + +Common h5repack parameter: UD=32019,0,4,q,c,r,t + +#### Contact Information: + +Mark Rivers , University of Chicago (rivers at cars.uchicago.edu) + +## + +

VBZ

+ +#### Filter ID: 32020 + +#### Filter Description: +This filter is used by Oxford Nanopore specifically to compress raw dna signal data (signed integer). To achieve this it uses both: + +streamvbyte (https://github.com/lemire/streamvbyte) + +zstd (https://github.com/facebook/zstd) + +#### Contact Information: + +George Pimm + +## + +

FAPEC

+ +#### Filter ID: 32021 + +#### Filter Description: + +FAPEC is a versatile and efficient data compressor, initially designed for satellite payloads but later extended for ground applications. It relies on an outlier-resilient entropy coding core with similar ratios and speeds than CCSDS 121.0 (adaptive Rice). + +FAPEC has a large variety of pre-processing stages and options: images (greyscale, colour, hyperspectral); time series or waveforms (including interleaving, e.g. for multidimensional or interleaved time series or tabular data); floating point (single+double precision); text (including LZW compression and our faster FAPECLZ); tabulated text (CSV); genomics (FastQ); geophysics (Kongsberg's water column datagrams); etc. + +Most stages support samples of 8 to 24 bits (big/little endian, signed/unsigned), and lossless/lossy options. It can be extended with new, tailored pre-processing stages. It includes encryption options (AES-256 based on OpenSSL, and our own XXTEA implementation). + +The FAPEC library and CLI runs on Linux, Windows and Mac. The HDF5 user must request and install the library separately, thus allowing to upgrade it without requiring changes in your HDF5 code. + +#### Filter Information: + +https://www.dapcom.es/fapec/ +https://www.dapcom.es/get-fapec/ +https://www.dapcom.es/resources/FAPEC_EndUserLicenseAgreement.pdf + +#### Contact Information: + +Jordi Portell i de Mora (DAPCOM Data Services S.L.) + +fapec at dapcom dot es + +## + +

BitGroom

+ +#### Filter ID: 32022 + +#### Filter Description: + +The BitGroom quantization algorithm is documented in: + +Zender, C. S. (2016), Bit Grooming: Statistically accurate precision-preserving quantization with compression, evaluated in the netCDF Operators (NCO, v4.4.8+), Geosci. Model Dev., 9, 3199-3211, doi:10.5194/gmd-9-3199-2016. + +#### Filter Information: + +The filter is documented and maintained in the Community Codec Repository (https://github.com/ccr/ccr). + +#### Contact Information: + +Charlie Zender (University of California, Irvine) + +## + +

Granular BitRound (GBR)

+ +#### Filter ID: 32023 + +#### Filter Description: + +The GBG quantization algorithm is a significant improvement the BitGroom filter documented in: + +Zender, C. S. (2016), Bit Grooming: Statistically accurate precision-preserving quantization with compression, evaluated in the netCDF Operators (NCO, v4.4.8+), Geosci. Model Dev., 9, 3199-3211, doi:10.5194/gmd-9-3199-2016. + +#### Filter Information: + +This filter is documented, implemented, and maintained in the Community Codec Repository (https://github.com/ccr/ccr). + +#### Contact Information: + +Charlie Zender (University of California, Irvine) + +## + +

SZ3

+ +#### Filter ID: 32024 + +#### Filter Description: + +SZ3 is a modular error-bounded lossy compression framework for scientific datasets, which allows users to customize their own compression pipeline to adapt to diverse datasets and user-requirements. Compared with SZ2 (filter id: 32017), SZ3 has integrated a more effective prediction such that its compression qualities/ratios are much higher than that of SZ2 in most of cases. + +#### Filter Information: + +This filter is documented, implemented, and maintained in github: https://github.com/szcompressor/SZ3. + +License: https://github.com/szcompressor/SZ/blob/master/copyright-and-BSD-license.txt + +#### Contact Information: + +Sheng Di +Email: sdi1 at anl dot gov + +Franck Cappello +Email: cappello at mcs dot anl dot gov + +## + +

Delta-Rice

+ +#### Filter ID: 32025 + +#### Filter Description: + +Lossless compression algorithm optimized for digitized analog signals based on delta encoding and rice coding. + +#### Filter Information: + +This filter is documented, implemented, and maintained at: https://gitlab.com/dgma224/deltarice. + +#### Contact Information: + +David Mathews +Email: david dot mathews dot 1994 at gmail dot com + +## + +

Blosc2 Filter

+ +#### Filter ID: 32026 + +#### Filter Description: + +Blosc is a high performance compressor optimized for binary data (i.e. floating point numbers, integers and booleans). It has been designed to transmit data to the processor cache faster than the traditional, non-compressed, direct memory fetch approach via a memcpy() OS call. Blosc main goal is not just to reduce the size of large datasets on-disk or in-memory, but also to accelerate memory-bound computations. + +C-Blosc2 is the new major version of C-Blosc, and tries hard to be backward compatible with both the C-Blosc1 API and its in-memory format. + +#### Filter Information: + +Blosc project: https://www.blosc.org + +C-Blosc2 docs: https://www.blosc.org/c-blosc2/c-blosc2.html + +License: https://github.com/Blosc/c-blosc2/blob/main/LICENSE.txt + +#### Contact Information: + +Francesc Alted +Email: faltet at gmail dot org (BDFL for the Blosc project) + +## + +

FLAC Filter

+ +#### Filter ID: 32027 + +#### Filter Description: + +FLAC is an audio compression filter in HDF5. (Our ultimate goal is to use it via h5py in the hdf5plugin library: https://github.com/silx-kit/hdf5plugin). + +#### Filter Information: + +The FLAC filter is open source: https://github.com/xiph/flac + +libFLAC has BSD-like license: https://github.com/xiph/flac/blob/master/CONTRIBUTING.md + +#### Contact Information: + +Laurie Stephey +Email: lastephey at lbl dot gov + +## + +

H5Z-SPERR Filter

+ +#### Filter ID: 32028 + +#### Filter Description: + +SPERR is a wavelet-based lossy compressor for floating-point scientific data; it achieves one of the best compression ratios given a user-prescribed error tolerance (i.e., maximum point-wise error). SPERR also supports two distinctive decoding modes, namely "flexible-rate decoding" and "multi-resolution decoding," that facilitate data analysis with various constraints. More details are available on SPERR Github repository: https://github.com/NCAR/SPERR. + +#### Filter Information: + +H5Z-SPERR is the HDF5 filter for SPERR. It's also available on Github: https://github.com/NCAR/H5Z-SPERR + +#### Contact Information: +Samuel Li +Email: shaomeng at ucar dot edu +